prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import math
import os
import time
from datetime import datetime
from math import inf
from heapq import heappop, heappush
import collections
import functools
from collections import defaultdict
import heapq
import random
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import gurobipy as gp
from gurobipy import *
from shapely.geometry import Point,LineString
import geopandas as gpd
import osmnx as ox
class World:
"""
一个类
"""
Observation = collections.namedtuple('Observation', 'traveltime origin destination') # 起点位置的集合
def __init__(self, type=0, num=100, sigma=0, reg=0, time_limit=0.6):
"""
nodeUrl: 图对象的点的标识信息和位置信息
edgeUrl: 图对象的弧的标识信息、位置信息以及连接信息
type: 选择图对象的类型,0为small,1为normal
超参数num,sigma,reg
"""
self.type = type
self.num = num
self.sigma = sigma
self.reg = reg
self.time_limit = time_limit
def True_Graph(self):
"""
如果type=0时,加载small_model的真实图。如果type=1时,加载normal_model的真实图。如果其他情况,加载manhattan的真实图。
:return: 返回一个加载好的的图G对象
"""
if self.type == 0:
# <载入文件模块>
df_nodelist = | pd.read_csv("../train_dataset/smallnodelist.csv") | pandas.read_csv |
import filecmp
import os
import shutil
from pathlib import Path
import pandas as pd
import pytest
import sas7bdat_converter.converter as converter
from tests.conftest import bad_sas_file
current_dir = Path().absolute()
def test_batch_to_csv_path(tmp_path, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = tmp_path.joinpath("file1.csv")
converted_file_2 = tmp_path.joinpath("file2.csv")
converted_file_3 = tmp_path.joinpath("file3.csv")
file_dict = [
{"sas7bdat_file": sas_file_1, "export_file": converted_file_1},
{"sas7bdat_file": sas_file_2, "export_file": converted_file_2},
{"sas7bdat_file": sas_file_3, "export_file": converted_file_3},
]
converter.batch_to_csv(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_csv_str(tmp_path, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = tmp_path.joinpath("file1.csv")
converted_file_2 = tmp_path.joinpath("file2.csv")
converted_file_3 = tmp_path.joinpath("file3.csv")
file_dict = [
{"sas7bdat_file": str(sas_file_1), "export_file": str(converted_file_1)},
{"sas7bdat_file": str(sas_file_2), "export_file": str(converted_file_2)},
{"sas7bdat_file": str(sas_file_3), "export_file": str(converted_file_3)},
]
converter.batch_to_csv(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_csv_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.csv")
converted_file = tmp_path.joinpath("file1.csv")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
converter.batch_to_csv(file_dict, continue_on_error=True)
assert converted_file.is_file()
assert "Error converting" in caplog.text
def test_batch_to_csv_no_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.csv")
converted_file = tmp_path.joinpath("file1.csv")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
with pytest.raises(Exception) as execinfo:
converter.batch_to_csv(file_dict, continue_on_error=False)
assert execinfo.value
file_dicts = [
[{"bad_key": "test.sas7bdat", "export_file": "test.csv"}],
[{"sas7bdat_file": "test.sas7bdat", "bad_key": "test.csv"}],
[{"sas_bad_key": "test.sas7bdate", "export_bad_key": "test.csv"}],
]
@pytest.mark.parametrize("file_dict", file_dicts)
def test_batch_to_csv_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_csv(file_dict)
assert "Invalid key provided" in str(execinfo.value)
def test_batch_to_excel_path(tmp_path, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = tmp_path.joinpath("file1.xlsx")
converted_file_2 = tmp_path.joinpath("file2.xlsx")
converted_file_3 = tmp_path.joinpath("file3.xlsx")
file_dict = [
{"sas7bdat_file": sas_file_1, "export_file": converted_file_1},
{"sas7bdat_file": sas_file_2, "export_file": converted_file_2},
{"sas7bdat_file": sas_file_3, "export_file": converted_file_3},
]
converter.batch_to_excel(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_excel_str(tmp_path, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = tmp_path.joinpath("file1.xlsx")
converted_file_2 = tmp_path.joinpath("file2.xlsx")
converted_file_3 = tmp_path.joinpath("file3.xlsx")
file_dict = [
{"sas7bdat_file": str(sas_file_1), "export_file": str(converted_file_1)},
{"sas7bdat_file": str(sas_file_2), "export_file": str(converted_file_2)},
{"sas7bdat_file": str(sas_file_3), "export_file": str(converted_file_3)},
]
converter.batch_to_excel(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_excel_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.xlsx")
converted_file = tmp_path.joinpath("file1.xlsx")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
converter.batch_to_excel(file_dict, continue_on_error=True)
assert converted_file.is_file()
assert "Error converting" in caplog.text
def test_batch_to_excel_no_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.xlsx")
converted_file = tmp_path.joinpath("file1.xlsx")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
with pytest.raises(FileNotFoundError) as execinfo:
converter.batch_to_excel(file_dict, continue_on_error=False)
assert execinfo.value
file_dicts = [
[{"bad_key": "test.sas7bdat", "export_file": "test.xlsx"}],
[{"sas7bdat_file": "test.sas7bdat", "bad_key": "<KEY>"}],
[{"sas_bad_key": "test.sas7bdate", "export_bad_key": "<KEY>"}],
]
@pytest.mark.parametrize("file_dict", file_dicts)
def test_batch_to_excel_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_excel(file_dict)
assert "Invalid key provided" in str(execinfo.value)
def test_batch_to_json_path(tmp_path, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = tmp_path.joinpath("file1.json")
converted_file_2 = tmp_path.joinpath("file2.json")
converted_file_3 = tmp_path.joinpath("file3.json")
file_dict = [
{"sas7bdat_file": sas_file_1, "export_file": converted_file_1},
{"sas7bdat_file": sas_file_2, "export_file": converted_file_2},
{"sas7bdat_file": sas_file_3, "export_file": converted_file_3},
]
converter.batch_to_json(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_json_path_str(tmp_path, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = tmp_path.joinpath("file1.json")
converted_file_2 = tmp_path.joinpath("file2.json")
converted_file_3 = tmp_path.joinpath("file3.json")
file_dict = [
{"sas7bdat_file": str(sas_file_1), "export_file": str(converted_file_1)},
{"sas7bdat_file": str(sas_file_2), "export_file": str(converted_file_2)},
{"sas7bdat_file": str(sas_file_3), "export_file": str(converted_file_3)},
]
converter.batch_to_json(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_json_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.json")
converted_file = tmp_path.joinpath("file1.json")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
converter.batch_to_json(file_dict, continue_on_error=True)
assert converted_file.is_file()
assert "Error converting" in caplog.text
def test_batch_to_json_no_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.json")
converted_file = tmp_path.joinpath("file1.json")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
with pytest.raises(Exception) as execinfo:
converter.batch_to_json(file_dict, continue_on_error=False)
assert execinfo.value
file_dicts = [
[{"bad_key": "test.sas7bdat", "export_file": "test.json"}],
[{"sas7bdat_file": "test.sas7bdat", "bad_key": "test.json"}],
[{"sas_bad_key": "test.sas7bdate", "export_bad_key": "test.json"}],
]
@pytest.mark.parametrize("file_dict", file_dicts)
def test_batch_to_json_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_json(file_dict)
assert "Invalid key provided" in str(execinfo.value)
optionals = [
{},
{"root_node": "root"},
{"first_node": "item"},
{"root_node": "root", "first_node": "item"},
]
@pytest.mark.parametrize("optional", optionals)
def test_batch_to_xml_path(tmp_path, sas_file_1, sas_file_2, sas_file_3, optional):
converted_file_1 = tmp_path.joinpath("file1.xml")
converted_file_2 = tmp_path.joinpath("file2.xml")
converted_file_3 = tmp_path.joinpath("file3.xml")
if optional.get("root_node") and optional.get("first_node"):
file_dict = [
{
"sas7bdat_file": sas_file_1,
"export_file": converted_file_1,
"root_node": optional.get("root_node"),
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": sas_file_2,
"export_file": converted_file_2,
"root_node": optional.get("root_node"),
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": sas_file_3,
"export_file": converted_file_3,
"root_node": optional.get("root_node"),
"first_node": optional.get("first_node"),
},
]
elif optional.get("root_node"):
file_dict = [
{
"sas7bdat_file": sas_file_1,
"export_file": converted_file_1,
"root_node": optional.get("root_node"),
},
{
"sas7bdat_file": sas_file_2,
"export_file": converted_file_2,
"root_node": optional.get("root_node"),
},
{
"sas7bdat_file": sas_file_3,
"export_file": converted_file_3,
"root_node": optional.get("root_node"),
},
]
elif optional.get("first_node"):
file_dict = [
{
"sas7bdat_file": sas_file_1,
"export_file": converted_file_1,
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": sas_file_2,
"export_file": converted_file_2,
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": sas_file_3,
"export_file": converted_file_3,
"first_node": optional.get("first_node"),
},
]
else:
file_dict = [
{
"sas7bdat_file": sas_file_1,
"export_file": converted_file_1,
},
{
"sas7bdat_file": sas_file_2,
"export_file": converted_file_2,
},
{
"sas7bdat_file": sas_file_3,
"export_file": converted_file_3,
},
]
converter.batch_to_xml(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
@pytest.mark.parametrize("optional", optionals)
def test_batch_to_xml_str(tmp_path, sas_file_1, sas_file_2, sas_file_3, optional):
converted_file_1 = tmp_path.joinpath("file1.xml")
converted_file_2 = tmp_path.joinpath("file2.xml")
converted_file_3 = tmp_path.joinpath("file3.xml")
if optional.get("root_node") and optional.get("first_node"):
file_dict = [
{
"sas7bdat_file": str(sas_file_1),
"export_file": str(converted_file_1),
"root_node": optional.get("root_node"),
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": str(sas_file_2),
"export_file": str(converted_file_2),
"root_node": optional.get("root_node"),
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": str(sas_file_3),
"export_file": str(converted_file_3),
"root_node": optional.get("root_node"),
"first_node": optional.get("first_node"),
},
]
elif optional.get("root_node"):
file_dict = [
{
"sas7bdat_file": str(sas_file_1),
"export_file": str(converted_file_1),
"root_node": optional.get("root_node"),
},
{
"sas7bdat_file": str(sas_file_2),
"export_file": str(converted_file_2),
"root_node": optional.get("root_node"),
},
{
"sas7bdat_file": str(sas_file_3),
"export_file": str(converted_file_3),
"root_node": optional.get("root_node"),
},
]
elif optional.get("first_node"):
file_dict = [
{
"sas7bdat_file": str(sas_file_1),
"export_file": str(converted_file_1),
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": str(sas_file_2),
"export_file": str(converted_file_2),
"first_node": optional.get("first_node"),
},
{
"sas7bdat_file": str(sas_file_3),
"export_file": str(converted_file_3),
"first_node": optional.get("first_node"),
},
]
else:
file_dict = [
{
"sas7bdat_file": str(sas_file_1),
"export_file": str(converted_file_1),
},
{
"sas7bdat_file": str(sas_file_2),
"export_file": str(converted_file_2),
},
{
"sas7bdat_file": str(sas_file_3),
"export_file": str(converted_file_3),
},
]
converter.batch_to_xml(file_dict)
files_created = False
if converted_file_1.is_file() and converted_file_2.is_file() and converted_file_3.is_file():
files_created = True
assert files_created
def test_batch_to_xml_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.xml")
converted_file = tmp_path.joinpath("file1.xml")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
converter.batch_to_xml(file_dict, continue_on_error=True)
assert converted_file.is_file()
assert "Error converting" in caplog.text
def test_batch_to_xml_no_continue(tmp_path, caplog, sas_file_1):
bad_sas_file = tmp_path.joinpath("bad_file.sas7bdat")
bad_converted_file = tmp_path.joinpath("bad_file.xml")
converted_file = tmp_path.joinpath("file1.xml")
file_dict = [
{"sas7bdat_file": bad_sas_file, "export_file": bad_converted_file},
{"sas7bdat_file": sas_file_1, "export_file": converted_file},
]
with pytest.raises(Exception) as execinfo:
converter.batch_to_xml(file_dict, continue_on_error=False)
assert execinfo.value
file_dicts = [
[{"bad_key": "test.sas7bdat", "export_file": "test.xml"}],
[{"sas7bdat_file": "test.sas7bdat", "bad_key": "<KEY>"}],
[{"sas_bad_key": "test.sas7bdate", "export_bad_key": "<KEY>"}],
[
{
"sas7bdat_file": "test.sas7bdat",
"export_file": "test.xml",
"root_node": "test",
"bad": "test",
}
],
[
{
"sas7bdat_file": "test.sas7bdat",
"export_file": "test.xml",
"bad": "test",
"first_node": "test",
}
],
]
@pytest.mark.parametrize("file_dict", file_dicts)
def test_batch_to_xml_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_xml(file_dict)
assert "Invalid key provided" in str(execinfo.value)
def test_dir_to_csv_same_dir_path(tmp_path, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
converter.dir_to_csv(tmp_path)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".csv"])
assert sas_counter == convert_counter
def test_dir_to_csv_same_dir_str(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_csv(tmpdir)
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".csv"])
assert sas_counter == convert_counter
def test_dir_to_csv_continue(tmp_path, caplog, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
converter.dir_to_csv(tmp_path, continue_on_error=True)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"]) - 1
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".csv"])
assert sas_counter == convert_counter
assert "Error converting" in caplog.text
def test_dir_to_csv_no_continue(tmp_path, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
with pytest.raises(Exception) as execinfo:
converter.dir_to_csv(tmp_path, continue_on_error=False)
assert execinfo.value
def test_dir_to_csv_different_dir_path(tmp_path, sas7bdat_dir):
converter.dir_to_csv(dir_path=sas7bdat_dir, export_path=tmp_path)
sas_counter = len([name for name in sas7bdat_dir.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".csv"])
assert sas_counter == convert_counter
def test_dir_to_csv_different_dir_str(tmpdir, sas7bdat_dir):
converter.dir_to_csv(dir_path=str(sas7bdat_dir), export_path=tmpdir)
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".csv"])
assert sas_counter == convert_counter
def test_dir_to_excel_same_dir_path(tmp_path, sas7bdat_dir):
sas_files = [x for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
converter.dir_to_excel(tmp_path)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".xlsx"])
assert sas_counter == convert_counter
def test_dir_to_excel_same_dir_str(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_excel(tmpdir)
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".xlsx"])
assert sas_counter == convert_counter
def test_dir_to_excel_different_dir_path(tmp_path, sas7bdat_dir):
converter.dir_to_excel(dir_path=sas7bdat_dir, export_path=tmp_path)
sas_counter = len([name for name in sas7bdat_dir.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".xlsx"])
assert sas_counter == convert_counter
def test_dir_to_excel_different_dir_str(tmpdir, sas7bdat_dir):
converter.dir_to_excel(dir_path=str(sas7bdat_dir), export_path=tmpdir)
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".xlsx"])
assert sas_counter == convert_counter
def test_dir_to_excel_continue(tmp_path, caplog, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
converter.dir_to_excel(tmp_path, continue_on_error=True)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"]) - 1
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".xlsx"])
assert sas_counter == convert_counter
assert "Error converting" in caplog.text
def test_dir_to_excel_no_continue(tmp_path, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
with pytest.raises(Exception) as execinfo:
converter.dir_to_excel(tmp_path, continue_on_error=False)
assert execinfo.value
def test_dir_to_json_same_dir_path(tmp_path, sas7bdat_dir):
sas_files = [x for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmp_path)
converter.dir_to_json(tmp_path)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".json"])
assert sas_counter == convert_counter
def test_dir_to_json_same_dir_str(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_json(tmpdir)
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".json"])
assert sas_counter == convert_counter
def test_dir_to_json_different_dir_path(tmp_path, sas7bdat_dir):
converter.dir_to_json(sas7bdat_dir, tmp_path)
sas_counter = len([name for name in sas7bdat_dir.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".json"])
assert sas_counter == convert_counter
def test_dir_to_json_different_dir_str(tmpdir, sas7bdat_dir):
converter.dir_to_json(str(sas7bdat_dir), tmpdir)
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".json"])
assert sas_counter == convert_counter
def test_dir_to_json_continue(tmp_path, caplog, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
converter.dir_to_json(tmp_path, continue_on_error=True)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"]) - 1
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".json"])
assert sas_counter == convert_counter
assert "Error converting" in caplog.text
def test_dir_to_json_no_continue(tmp_path, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
with pytest.raises(Exception) as execinfo:
converter.dir_to_json(tmp_path, continue_on_error=False)
assert execinfo.value
def test_dir_to_xml_same_dir_path(tmp_path, sas7bdat_dir):
sas_files = [x for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
converter.dir_to_xml(tmp_path)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".xml"])
assert sas_counter == convert_counter
def test_dir_to_xml_same_dir_str(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_xml(tmpdir)
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".xml"])
assert sas_counter == convert_counter
def test_dir_to_xml_different_dir_path(tmp_path, sas7bdat_dir):
converter.dir_to_xml(sas7bdat_dir, tmp_path)
sas_counter = len([name for name in sas7bdat_dir.iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".xml"])
assert sas_counter == convert_counter
def test_dir_to_xml_different_dir_str(tmpdir, sas7bdat_dir):
converter.dir_to_xml(str(sas7bdat_dir), tmpdir)
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == ".sas7bdat"])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == ".xml"])
assert sas_counter == convert_counter
def test_dir_to_xml_continue(tmp_path, caplog, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
converter.dir_to_xml(tmp_path, continue_on_error=True)
sas_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".sas7bdat"]) - 1
convert_counter = len([name for name in tmp_path.iterdir() if name.suffix == ".xml"])
assert sas_counter == convert_counter
assert "Error converting" in caplog.text
def test_dir_to_xml_no_continue(tmp_path, sas7bdat_dir, bad_sas_file):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, str(tmp_path))
shutil.copy(bad_sas_file, str(tmp_path))
with pytest.raises(Exception) as execinfo:
converter.dir_to_xml(tmp_path, continue_on_error=False)
assert execinfo.value
exception_data = [
(
"sas7bdat conversion error - Valid extension for to_csv conversion is: .csv",
[".csv"],
"to_csv",
),
(
"sas7bdat conversion error - Valid extensions for to_csv conversion are: .csv, .txt",
[".csv", ".txt"],
"to_csv",
),
]
@pytest.mark.parametrize("exception", exception_data)
def test_file_extension_exception_message(exception):
valid_message = exception[0]
valid_extensions = exception[1]
test_message = converter._file_extension_exception_message(exception[2], valid_extensions)
assert valid_message == test_message
def test_invalid_key_exception_message_no_optional():
valid_message = "Invalid key provided, expected keys are: sas7bdat_file, export_file"
required_keys = ["sas7bdat_file", "export_file"]
test_message = converter._invalid_key_exception_message(required_keys=required_keys)
assert valid_message == test_message
def test_invalid_key_exception_message_optional():
valid_message = "Invalid key provided, expected keys are: sas7bdat_file, export_file and optional keys are: root_node, first_node"
required_keys = ["sas7bdat_file", "export_file"]
optional_keys = ["root_node", "first_node"]
test_message = converter._invalid_key_exception_message(
required_keys=required_keys, optional_keys=optional_keys
)
assert valid_message == test_message
@pytest.mark.parametrize(
"data",
[
(
(
".txt",
".csv",
),
".xml",
),
((".sas7bdat",), ".json"),
],
)
def test_is_valid_extension_false(data):
valid_extensions = data[0]
file_extension = data[1]
assert not converter._is_valid_extension(valid_extensions, file_extension)
@pytest.mark.parametrize(
"data",
[
(
(
".txt",
".csv",
),
".csv",
),
((".sas7bdat",), ".sas7bdat"),
],
)
def test_is_valid_extension_true(data):
valid_extensions = data[0]
file_extension = data[1]
assert converter._is_valid_extension(valid_extensions, file_extension)
@pytest.fixture(params=["sas_file_1", "sas_file_2", "sas_file_3"])
def test_to_csv_path(tmp_path, request, expected_dir):
sas_file = Path(request.getfixturevalue(request.param))
converted_file = tmp_path.joinpath("file1.csv")
expected_file = expected_dir.joinpath("file1.csv")
converter.to_csv(sas_file, converted_file)
assert filecmp.cmp(converted_file, expected_file, shallow=False)
@pytest.fixture(params=["sas_file_1", "sas_file_2", "sas_file_3"])
def test_to_csv_str(tmpdir, request, expected_dir):
sas_file = request.getfixturevalue(request.param)
converted_file = str(Path(tmpdir).joinpath("file1.csv"))
expected_file = expected_dir.joinpath("file1.csv")
converter.to_csv(sas_file, converted_file)
assert filecmp.cmp(converted_file, expected_file, shallow=False)
def test_to_csv_invalid_extension():
with pytest.raises(AttributeError) as execinfo:
converter.to_csv("test.sas7bdat", "test.bad")
assert "sas7bdat conversion error - Valid extension" in str(execinfo.value)
def test_to_dataframe(sas_file_1):
d = {
"integer_row": [
1.0,
2.0,
3.0,
4.0,
5.0,
],
"text_row": [
"Some text",
"Some more text",
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc lobortis, risus nec euismod condimentum, lectus ligula porttitor massa, vel ornare mauris arcu vel augue. Maecenas rhoncus consectetur nisl, ac convallis enim pellentesque efficitur. Praesent tristique . End of textlectus a dolor sodales, in porttitor felis auctor. Etiam dui mauris, commodo at venenatis eu, lacinia nec tellus. Curabitur dictum tincidunt convallis. Duis vestibulum mauris quis felis euismod bibendum. Nulla eget nunc arcu. Nam quis est urna. In eleifend ultricies ultrices. In lacinia auctor ex, sed commodo nisl fringilla sed. Fusce iaculis viverra eros, nec elementum velit aliquam non. Aenean sollicitudin consequat libero, eget mattis.",
"Text",
"Test",
],
"float_row": [
2.5,
17.23,
3.21,
100.9,
98.6,
],
"date_row": [
"2018-01-02",
"2018-02-05",
"2017-11-21",
"2016-05-19",
"1999-10-25",
],
}
df = pd.DataFrame(data=d)
df["date_row"] = pd.to_datetime(df["date_row"])
df = df[["integer_row", "text_row", "float_row", "date_row"]]
df_file = converter.to_dataframe(sas_file_1)
pd.testing.assert_frame_equal(df, df_file, check_datetimelike_compat=True)
@pytest.fixture(params=["sas_file_1", "sas_file_2", "sas_file_3"])
def test_to_excel_path(tmp_path, request, expected_dir):
sas_file = Path(request.getfixturevalue(request.param))
converted_file = tmp_path.joinpath("file1.xlsx")
expected_file = expected_dir.joinpath("file1.xlsx")
converter.to_excel(sas_file, converted_file)
df_expected = pd.read_excel(expected_file, engine="openpyxl")
df_converted = pd.read_excel(converted_file, engine="openpyxl")
pd.testing.assert_frame_equal(df_expected, df_converted)
@pytest.fixture(params=["sas_file_1", "sas_file_2", "sas_file_3"])
def test_to_excel_str(tmpdir, request, expected_dir):
sas_file = request.getfixturevalue(request.param)
converted_file = str(Path(tmpdir).joinpath("file1.xlsx"))
expected_file = expected_dir.joinpath("file1.xlsx")
converter.to_excel(sas_file, converted_file)
df_expected = pd.read_excel(expected_file, engine="openpyxl")
df_converted = pd.read_excel(converted_file, engine="openpyxl")
| pd.testing.assert_frame_equal(df_expected, df_converted) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 09:23:58 2021
@author: nathanielgates
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import gekko_load_follow as g1
import gekko_co_gen as g2
import gekko_tri_gen as g3
import utilities as util
from time import time
#%% Run models
end = 11 # 5 # 14 # 11 # 9 # 7 # 9 # 5 # 11
time_steps = [int(2**i) for i in range(2, end)]
imodes = [6, 9]
# imodes = [6] # 0.76 min (for nodes = [0, 3, 4, 5] with end=5)
# imodes = [9] # 5.03 min (for nodes = [0, 1, 2, 3] with end=5)
# 0.91 min for imodes = [6], nodes = [3], end = 9
# 3.72 min for imodes = [6], nodes = [3], end = 11 (51 and 118 sec added)
# 2.45 min for imodes = [6], nodes = [3], end = 11 (with GEKKO server)
# 5.79 min for imodes = [6, 9], nodes = [2], end = 11, imode_9_lim = 64 (BYU)
# 16.56min for imodes = [6, 9], nodes = [2], end = 11, imode_9_lim = 128 (BYU)
# imode_9_lim = 128 # 64
imode_9_lim = 64
name1 = 'Load-follow'#'ing'
name2 = 'Co-gen'#'eration'
name3 = 'Tri-gen'#'eration'
names = [name1, name2, name3]
models = [g1, g2, g3]
numbers = [1, 2, 3]
# names = [
# # name1,
# name2,
# # name3
# ]
# models = [
# # g1,
# g2,
# # g3
# ]
# numbers = [
# # 1,
# 2,
# # 3
# ]
# nodes = [0, 1, 2, 3, 4, 5]
# 5min for N0-N5 and end=9
# nodes = [2, 3, 4, 5, 6]
nodes = [2]
d = {}
df = {}
for imode in imodes:
print('\n---- iMode {} ----'.format(imode))
d[imode] = {}
df[imode] = {}
time_start = time()
for n in time_steps:
if (imode == 9) & (n > imode_9_lim):
continue
print('Timesteps: {}'.format(n))
t = np.linspace(0, 1, n)
# Insert finer resolution at start
add = [0.01]#[0.01]#, 0.02]
t = np.array(list(sorted(set(list(t) + add))))
d[imode][n] = {}
df[imode][n] = {}
for node in nodes:
print(' Nodes: {}'.format(node))
df[imode][n][node] = {}
d[imode][n][node] = {}
time1 = time()
for model, name, number in zip(models, names, numbers):
print(' Model: {}-{}'.format(number, name))
time1_a = time()
# Solve the optimization prooblem
sol, res = model.model(t, imode=imode, nodes=node, disp=True,
solver=3)
# solver=2) # Try this...
time2_a = time()
time_sum_a = time2_a - time1_a
print(' Time: {:.2f}s'.format(time_sum_a))
df[imode][n][node][number] = sol
d[imode][n][node][number] = res
time2 = time()
time_sum = time2 - time1
print(' Time: {:.2f}s'.format(time_sum))
time_end = time()
time_tot = time_end - time_start
print('Total time: {:.2f}min'.format(time_tot/60))
df_raw = df.copy()
#%% Process data
df = df_raw.copy()
for imode in imodes:
for n in time_steps:
if (imode == 9) and (n > imode_9_lim):
continue
for node in nodes:
df[imode][n][node] = (pd.DataFrame(df[imode][n][node])
.T
.reset_index()
.rename(columns={'index': 'number'})
)
df[imode][n] = pd.concat(df[imode][n])
df[imode] = pd.concat(df[imode])
df = ( | pd.concat(df) | pandas.concat |
# enables access to directories/files
import os
# for handling data
import numpy as np
from numpy import array
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
# graphing
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import seaborn as sns
# statistics
from statsmodels.graphics.gofplots import qqplot
from scipy import stats
import scikit_posthocs as sp
from scipy.stats import zscore
from scipy.stats import ks_2samp
from statistics import mean
import statsmodels.api as sm
from statsmodels.formula.api import ols
from scipy import stats
from scipy.stats import zscore
from scipy.stats import ks_2samp
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
import scikit_posthocs as sp
from statsmodels.stats.anova import AnovaRM
from statsmodels.stats.libqsturng import psturng
import re
from ast import literal_eval
import more_itertools
import math
from matplotlib import lines
from matplotlib.offsetbox import AnchoredText
import imgkit
# machine learning
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from xgboost import XGBRegressor
from sklearn.metrics import explained_variance_score, r2_score
from sklearn.metrics import median_absolute_error
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
import scipy.cluster.hierarchy as hac
import matplotlib.gridspec as gridspec
import random
import six
from sklearn.preprocessing import LabelEncoder
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
from matplotlib import lines
from matplotlib.offsetbox import AnchoredText
def generate_dictionary_for_telomere_length_data(patharg):
"""
USAGE:
telomere_data_dict = generate_dictionary_for_telomere_length_data(directory)
Where the directory contains images of files containing telomere length data in
a predefined format. This function is written specifically for the Excel file templates
that I use, and will provide in this repository, but could be altered for any format.
The individual telomere lengths column is extracted, cleansed of missing values & DAPI-intensity
values; outliers (3 std devs from mean of column) are removed; and the telomere length values are
standardized to each other by use of fluorescent beads which calibrate according to inherent
differences between microscope imaging sessions. The individual's ID & timepoint (from filename) (KEY)
is associated with its respective individual telomere length data (VALUE) as a KEY:VALUE pair
in the dictionary. The dictionary can then be looped over to initialize all timepoint data
for that individual for analysis, i.e visualizations, statistics, etc.
"""
# initialize dictionary to hold our data
dict_astro_individ_telos_dfs = {}
# loop through directory to grab files
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(f'{file.name} telomere data acquisition in progress..')
try:
df = pd.read_excel(file)
except:
print(f'{file.name} File not found..')
return -1
df.rename(columns={'Unnamed: 3':'Individ Telos'}, inplace=True)
# these numbers correspond to rows containing information about the DAPI counterstain, NOT telomeres, so we drop
DAPI_values_to_drop=[5, 192, 379, 566, 753, 940, 1127, 1314, 1501, 1688, 1875, 2062,
2249, 2436, 2623, 2810, 2997, 3184, 3371, 3558, 3745, 3932, 4119, 4306, 4493,
4680, 4867, 5054, 5241, 5428]
# grabbing individual telomere length data from the file & dropping DAPI info
individual_telos_lengths = (df['Individ Telos'])
individual_telos_lengths = individual_telos_lengths.drop(labels=DAPI_values_to_drop)
# first pass at generating synthetic data for github exposition; to initialize actual
# data, comment out the line below, and uncomment the .iloc[] line
# individual_telos_lengths = individual_telos_lengths.sample(2500, random_state=1)
individual_telos_lengths = individual_telos_lengths.iloc[7:5611]
# ensure the telomere measurements are a numeric data type, drop any missing values,
# make data into a dataframe
telos_str_toNaN = pd.to_numeric(individual_telos_lengths, errors='coerce')
individual_telos_cleaned = telos_str_toNaN.dropna(axis=0, how='any')
telos_df = individual_telos_cleaned.to_frame(name=None)
# remove any telomere measurements that lie beyond 3 standard deviations of the mean
# the data is relatively normal in shape, & this process removes about ~10-20 telos from ~5520
# modest loss, acceptable to help standardize
telos_individ_df = telos_df[(np.abs(stats.zscore(telos_df)) < 3).all(axis=1)]
# logic clauses for recognizing which astronaut ID is in the sample name
# different astronauts were imaging at different times and thus associated with
# different Cy3 calibrations for the microscope, thus data is standardized according to Cy3
if ('5163' in file.name) or ('1536' in file.name):
telos_individ_df_cy3Cal = telos_individ_df.div(59.86)
elif '2171' in file.name or '4419' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(80.5)
elif '7673' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.11)
elif '2479' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.18)
elif '1261' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.16)
else:
telos_individ_df_cy3Cal = telos_individ_df
# average of all cy3 calibrated control telo measurements (11 age matched controls)
# telos_individ_df_cy3Cal = telos_individ_df_cy3Cal.div(116.1848153)
file_name_trimmed = file.name.replace('.xlsx', '')
dict_astro_individ_telos_dfs[file_name_trimmed] = telos_individ_df_cy3Cal
print('Done collecting all astronaut telomere length excel files')
return dict_astro_individ_telos_dfs
def astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone, axsNUMtwo):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone,axsNUMtwo].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
axs[axsNUMone,axsNUMtwo].set_title(f"Histogram of {astroname}'s Telomeres")
axs[axsNUMone,axsNUMtwo].set_xlabel('Bins of Individ. Telomeres')
axs[axsNUMone,axsNUMtwo].set_ylabel('Freqs of Individ. Telomeres')
axs[axsNUMone,axsNUMtwo].xaxis.set_major_locator(plt.MaxNLocator(12))
def astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
axs[axsNUMone].set_title(f'Histogram of Individual Telomeres for {astroname}')
axs[axsNUMone].set_xlabel('Bins of Individ. Telomeres')
axs[axsNUMone].set_ylabel('Freqs of Individ. Telomeres')
axs[axsNUMone].xaxis.set_major_locator(plt.MaxNLocator(19))
def gen_missing_values_andimpute_or_randomsampledown(n_cells, telosPercell, astro_df, option=None):
if astro_df.size > 5520:
astro_dfsampled = astro_df.sample(5520)
return astro_dfsampled
if astro_df.size > 25 and astro_df.size <= 2760:
missing_data_difference = abs( (n_cells * telosPercell) - astro_df.size )
rsampled = astro_df.sample(missing_data_difference, replace=True, random_state=28)
concat_ed = pd.concat([rsampled, astro_df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
concat_ed.reset_index(drop=True, inplace=True)
return concat_ed
if astro_df.size > 25 and astro_df.size < 5520:
missing_data_difference = abs( (n_cells * telosPercell) - astro_df.size )
if option == 'rsamp':
rsampled = astro_df.sample(missing_data_difference, random_state=28)
concat_ed = pd.concat([rsampled, astro_df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
concat_ed.reset_index(drop=True, inplace=True)
return concat_ed
else:
return astro_df
else:
return astro_df
def statistics_between_timepoints(astro_pre, astro_mid1, astro_mid2, astro_post,
astro_prename, astro_mid1name, astro_mid2name, astro_postname, test):
print( astro_prename + ' vs ' + astro_mid1name,
test(astro_pre, astro_mid1), '\n',
astro_prename + ' vs ' + astro_mid2name,
test(astro_pre, astro_mid2),'\n',
astro_mid1name + ' vs ' + astro_postname,
test(astro_mid1, astro_post),'\n',
astro_mid1name + ' vs ' + astro_mid2name,
test(astro_mid1, astro_mid2),'\n',
astro_mid2name + ' vs ' + astro_postname,
test(astro_mid2, astro_post),'\n',
astro_prename + ' vs ' + astro_postname,
test(astro_pre, astro_post),'\n', )
def statistics_between_timepoints_prepost_only(astro_pre, astro_post, astro_prename, astro_postname):
print(astro_prename + ' compared vs ' + astro_postname,
stats.mannwhitneyu(astro_pre, astro_post),'\n', )
def get_astro_number_from_id(astro_id):
astro_num = ''
if astro_id == '5163':
astro_num = 99
synth = 'synthetic 1'
elif astro_id == '1536':
astro_num = 1
synth = 'synthetic 2'
elif astro_id == '7673':
astro_num = 2
synth = 'synthetic 3'
elif astro_id == '2479':
astro_num = 3
synth = 'synthetic 4'
elif astro_id == '2171':
astro_num = 4
synth = 'synthetic 5'
elif astro_id == '1261':
astro_num = 5
synth = 'synthetic 7'
elif astro_id == '3228':
astro_num = 6
synth = 'synthetic 8'
elif astro_id == '2381':
astro_num = 98
synth = 'synthetic 9'
elif astro_id == '4819':
astro_num = 7
synth = 'synthetic 10'
elif astro_id == '1062':
astro_num = 8
synth = 'synthetic 11'
elif astro_id == '2494':
astro_num = 9
synth = 'synthetic 12'
elif astro_id == '4419':
astro_num = 1011
synth = 'synthetic 99'
return astro_num, synth
def relative_flight_timepoint(name_key):
if 'L' in name_key:
flight_status = 'Pre-Flight'
elif 'FD' in name_key:
flight_status = 'Mid-Flight'
elif 'R' in name_key:
flight_status = 'Post-Flight'
return flight_status
def quartile_cts_rel_to_df1(df1, df2):
df1 = pd.DataFrame(df1)
df2 = pd.DataFrame(df2)
quartile_1 = df2[df2 <= df1.quantile(0.25)].count()
quartile_2_3 = df2[(df2 > df1.quantile(0.25)) & (df2 < df1.quantile(0.75))].count()
quartile_4 = df2[df2 >= df1.quantile(0.75)].count()
return quartile_1.values, quartile_2_3.values, quartile_4.values
def get_timepoint(name_key):
timepoint_5_char = ['L-270', 'L-180', 'FD140', 'FD260', 'R+105', 'R+180', 'R+270']
timepoint_4_char = ['L-60', 'FD45', 'FD90', 'R+60']
timepoint_3_char = ['R+5', 'R+7']
for timepoint in timepoint_5_char:
if timepoint in name_key:
timepoint = name_key[-5:]
return timepoint.strip()
for timepoint in timepoint_4_char:
if timepoint in name_key:
timepoint = name_key[-4:]
return timepoint.strip()
for timepoint in timepoint_3_char:
if timepoint in name_key:
timepoint = name_key[-3:]
return timepoint.strip()
def make_quartiles_columns(astro_df):
pos_1, pos_2, pos_3 = 6, 7, 8
astro_id, timepoint, flight, telo_data = 1, 2, 3, 4
for i, row in astro_df.iterrows():
astro_id_4digit = row[astro_id]
if row[flight] == 'Pre-Flight' and row[timepoint] == 'L-270':
preFlight_telos = row[telo_data]
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, preFlight_telos))
elif row[flight] == 'Pre-Flight' and row[timepoint] == 'L-180':
if 'L-270' in list(astro_df[astro_df['astro id'] == astro_id_4digit]['timepoint']):
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
elif 'L-270' not in list(astro_df[astro_df['astro id'] == astro_id_4digit]['timepoint']):
preFlight_telos = row[telo_data]
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, preFlight_telos))
elif row[flight] == 'Pre-Flight':
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
elif row[flight] == 'Mid-Flight':
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
elif row[flight] == 'Post-Flight':
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
else:
print('unknown label in row[1] of the all patients df.. please check patient timepoint names')
return astro_df
def graphing_statistics_telomere_data(dict_astro_individ_telos_dfs):
astro_list_of_IDs = ['5163', '2171', '1536', '7673', '4819', '3228',
'2494', '2479', '2381', '1261', '1062']
timepoint_series = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140',
'FD260', 'R+5', 'R+7', 'R+60', 'R+180', 'R+270']
n=0
for idNO in astro_list_of_IDs:
n+=1
# #initialize blank list of timepoints
data = [[1, 0, 0, 0], [0]]
emptydata = pd.DataFrame(data)
astro_L270 = pd.DataFrame(data)
astro_L180 = pd.DataFrame(data)
astro_L60 = pd.DataFrame(data)
astro_Mid1 = pd.DataFrame(data)
astro_Mid2 = pd.DataFrame(data)
astro_R7 = pd.DataFrame(data)
astro_R60 = pd.DataFrame(data)
astro_R180 = pd.DataFrame(data)
astro_R270 = pd.DataFrame(data)
astro_L270name = ''
astro_L180name = ''
astro_L60name = ''
astro_Mid1name = ''
astro_Mid2name = ''
astro_R7name = ''
astro_R60name = ''
astro_R180name = ''
astro_R270name = ''
for j in timepoint_series:
for i in dict_astro_individ_telos_dfs.keys():
if (idNO in i) and j == 'L-270' and ('L-270' in i):
astro_L270 = dict_astro_individ_telos_dfs[i]
astro_L270name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'L-180' and ('L-180' in i):
astro_L180 = dict_astro_individ_telos_dfs[i]
astro_L180name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'L-60' and ('L-60' in i):
astro_L60 = dict_astro_individ_telos_dfs[i]
astro_L60name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and (j == 'FD45' or j == 'FD90') and (j in i):
astro_Mid1 = dict_astro_individ_telos_dfs[i]
astro_Mid1name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and (j == 'FD140' or j == 'FD260') and (j in i):
astro_Mid2 = dict_astro_individ_telos_dfs[i]
astro_Mid2name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+7' and (j in i):
astro_R7 = dict_astro_individ_telos_dfs[i]
astro_R7name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+60' and (j in i):
astro_R60 = dict_astro_individ_telos_dfs[i]
astro_R60name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+180' and (j in i):
astro_R180 = dict_astro_individ_telos_dfs[i]
astro_R180name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+270' and (j in i):
astro_R270 = dict_astro_individ_telos_dfs[i]
astro_R270name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
else:
continue
if idNO == '5163' or idNO == '2171' or idNO == '1536':
if (astro_L270.size > 25 or astro_L180.size > 25) and (astro_Mid1.size > 25 and astro_Mid2.size > 25 ) and (astro_R180.size > 25 or astro_R270.size > 25):
n_cells = 30
astro_L270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_L270, 'rsamp')
astro_L180 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_L180, 'rsamp')
astro_Mid1 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_Mid1, 'rsamp')
astro_Mid2 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_Mid2, 'rsamp')
astro_R180 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_R180, 'rsamp')
astro_R270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_R270, 'rsamp')
n_bins = 30
fig, axs = plt.subplots(2,2, sharey=True, tight_layout=False, figsize = (16, 12))
if astro_L270name != '':
if astro_R270name != '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L270, astro_L270, astro_L270name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L270, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L270, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R270, astro_L270, astro_R270name, 1, 1)
# print('stats')
# statistics_between_timepoints(astro_L270, astro_Mid1, astro_Mid2, astro_R270,
# astro_L270name, astro_Mid1name, astro_Mid2name, astro_R270name)
elif astro_R270name == '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L270, astro_L270, astro_L270name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L270, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L270, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R180, astro_L270, astro_R180name, 1, 1)
# print('stats')
# statistics_between_timepoints(astro_L270, astro_Mid1, astro_Mid2, astro_R180,
# astro_L270name, astro_Mid1name, astro_Mid2name, astro_R180name)
elif astro_L270name == '':
if astro_R270name == '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L180, astro_L180, astro_L180name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L180, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L180, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R180, astro_L180, astro_R180name, 1, 1)
# print('randomly sampled stats')
# statistics_between_timepoints(astro_L180, astro_Mid1, astro_Mid2, astro_R180,
# astro_L180name, astro_Mid1name, astro_Mid2name, astro_R180name)
elif astro_R270name != '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L180, astro_L180, astro_L180name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L180, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L180, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R270, astro_L180, astro_R270name, 1, 1)
# print('randomly sampled stats')
# statistics_between_timepoints(astro_L180, astro_Mid1, astro_Mid2, astro_R270,
# astro_L180name, astro_Mid1name, astro_Mid2name, astro_R270name)
else:
continue
# plt.savefig('Final telomere histogram random sampling dso'+idNO+'.pdf')
plt.show()
if idNO in ['7673', '4819', '3228', '2494', '2479', '2381', '1261', '1062']:
if (astro_L270.size > 25) and (astro_R270.size > 25):
n_cells = 30
# astro_L270name = f'synthetic astronaut {n} L+270'
# astro_R270name = f'synthetic astronaut {n} R+270'
astro_L270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_L270, 'rsamp')
astro_R270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_R270, 'rsamp')
n_bins = 30
fig, axs = plt.subplots(2, sharey=True, tight_layout=False, figsize = (12, 14))
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astro_L270, astro_L270, astro_L270name, 0)
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astro_R270, astro_L270, astro_R270name, 1)
# statistics_between_timepoints_prepost_only(astro_L270, astro_R270, astro_L270name, astro_R270name)
else:
continue
# plt.savefig('Resampled telomere histogram dso'+idNO+'.pdf')
plt.show()
def grab_control_values_generate_dictionary(patharg):
dict_mean_individ_telos_dfs = {}
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(file.name, 'telomere data acquisition in progress..')
try:
df = pd.read_excel(file)
except:
print('File not found..')
return -1
df.rename(columns={'Unnamed: 3':'Mean Individ Telos'}, inplace=True)
mean_values_of_individual_telomere_lengths = (df['Mean Individ Telos'])
mean_values_of_individual_telomere_lengths = mean_values_of_individual_telomere_lengths.drop(labels=[5, 192, 379, 566, 753, 940, 1127, 1314,
1501, 1688, 1875, 2062, 2249, 2436, 2623, 2810, 2997, 3184, 3371, 3558, 3745, 3932, 4119, 4306, 4493, 4680, 4867, 5054, 5241, 5428])
mean_values_of_individual_telomere_lengths = mean_values_of_individual_telomere_lengths.iloc[7:5611]
meantelos_str_toNaN = pd.to_numeric(mean_values_of_individual_telomere_lengths, errors='coerce')
mean_individual_telos_cleaned = meantelos_str_toNaN.dropna(axis=0, how='any')
mean_individ_df = mean_individual_telos_cleaned.to_frame(name=None)
mean_individ_df = mean_individ_df[(np.abs(stats.zscore(mean_individ_df)) < 3).all(axis=1)]
if '0397' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.285)
elif '3907' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.179)
elif '1826' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.143)
elif '0100' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(59.86)
elif '0912' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
elif '0646' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
else:
mean_individ_df_cy3Cal = mean_individ_df
file_name_trimmed = file.name.replace('.xlsx', '')
mean_individ_df_cy3Cal = gen_missing_values_andimpute_or_randomsampledown(30, 184, mean_individ_df_cy3Cal, 'rsamp')
dict_mean_individ_telos_dfs[file_name_trimmed] = mean_individ_df_cy3Cal
print('data collection complete')
return dict_mean_individ_telos_dfs
def grab_control_telo_values_per_cell_generate_dictionary(patharg):
dict_mean_individ_telos_dfs = {}
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(file.name, 'telomere data acquisition in progress..')
try:
df = pd.read_excel(file, skiprows=3)
df = df.iloc[0:30, 12].to_frame()
except:
print('File not found..')
return -1
mean_individ_df = df.dropna(axis=0, how='any')
if '0397' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.285)
elif '3907' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.179)
elif '1826' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.143)
elif '0100' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(59.86)
elif '0912' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
elif '0646' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
else:
mean_individ_df_cy3Cal = mean_individ_df
file_name_trimmed = file.name.replace('.xlsx', '')
mean_individ_df_cy3Cal = mean_individ_df_cy3Cal.div(116.1848153)
dict_mean_individ_telos_dfs[file_name_trimmed] = mean_individ_df_cy3Cal
print('data collection complete')
return dict_mean_individ_telos_dfs
def grab_astro_telo_values_per_cell_generate_dictionary(patharg):
dict_astro_individ_telos_dfs = {}
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(f'{file.name} telomere data acquisition in progress..')
try:
df = pd.read_excel(file, skiprows=3)
df = df.iloc[0:30, 12].to_frame()
except:
print(f'{file.name} File not found..')
return -1
telos_individ_df = df.dropna(axis=0, how='any')
if ('5163' in file.name) or ('1536' in file.name):
telos_individ_df_cy3Cal = telos_individ_df.div(59.86)
elif '2171' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(80.5)
elif '7673' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.11)
elif '2479' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.18)
elif '1261' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.16)
else:
telos_individ_df_cy3Cal = telos_individ_df
telos_individ_df_cy3Cal = telos_individ_df_cy3Cal.div(116.1848153)
file_name_trimmed = file.name.replace('.xlsx', '')
dict_astro_individ_telos_dfs[file_name_trimmed] = telos_individ_df_cy3Cal
print('Done collecting all astronaut telomere length excel files')
return dict_astro_individ_telos_dfs
def raincloud_plot_astros_groups(x=None, y=None, data=None,
groupby=None, iterable=None):
group_df = data.groupby(groupby)
for item in iterable:
plot_df = group_df.get_group(item)
if x == 'timepoint':
#this line only needed for timepoint
plot_df[x].cat.remove_unused_categories(inplace=True)
ax = sns.set(font_scale=1)
#bw = sigma
ax = pt.RainCloud(x = x, y = y, data = plot_df, palette = "Set2", bw = .20,
width_viol = .8, figsize = (8,6), move=0.21, orient = "h")
plt.title(f'{item} telos', fontsize=16)
def make_astronaut_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = gen_missing_values_andimpute_or_randomsampledown(30, 184, pd.Series(telo_value.values.reshape(-1,)), 'rsamp')
data.append([astro_num, astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['astro number', 'astro id', 'timepoint', 'flight status', 'telo data', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df['Q1'] = 'telos preF Q1 <0.25'
astro_df['Q2-3'] = 'telos preF Q2-3 >0.25 & <0.75'
astro_df['Q4'] = 'telos preF Q4 >0.75'
astro_df = astro_df.sort_values(['astro number', 'timepoint']).reset_index(drop=True)
return astro_df
def make_astronaut_cell_data_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = pd.Series(telo_value.values.reshape(-1,))
data.append([astro_num, astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['astro number', 'astro id', 'timepoint', 'flight status', 'telo data per cell', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df['Q1'] = 'telos preF Q1 <0.25'
astro_df['Q2-3'] = 'telos preF Q2-3 >0.25 & <0.75'
astro_df['Q4'] = 'telos preF Q4 >0.75'
astro_df = astro_df.sort_values(['astro number', 'timepoint']).reset_index(drop=True)
return astro_df
def make_control_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
# astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = pd.Series(telo_value.values.reshape(-1,))
data.append([astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['control id', 'timepoint', 'flight status controls', 'telo data', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df = astro_df.sort_values(['control id', 'timepoint']).reset_index(drop=True)
return astro_df
def make_control_cell_data_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
# astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = pd.Series(telo_value.values.reshape(-1,))
data.append([astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['control id', 'timepoint', 'flight status controls', 'telo data per cell', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df = astro_df.sort_values(['control id', 'timepoint']).reset_index(drop=True)
return astro_df
def mid_split(row):
if 'FD90' in row or 'FD45' in row:
return 'Mid-Flight 1'
elif 'FD140' in row or 'FD260' in row:
return 'Mid-Flight 2'
elif 'L' in row:
return 'Pre-Flight'
elif 'R' in row:
return 'Post-Flight'
def histogram_plot_groups(x=None, data=None,
groupby=None, iterable=None):
group_df = data.groupby(groupby)
for item in iterable:
plot_df = group_df.get_group(item)
non_irrad = plot_df[plot_df['timepoint'] == '1 non irrad'][x]
irrad_4_Gy = plot_df[plot_df['timepoint'] == '2 irrad @ 4 Gy'][x]
three_B = plot_df[plot_df['timepoint'] == '3 B'][x]
four_C = plot_df[plot_df['timepoint'] == '4 C'][x]
n_bins = 70
fig, axs = plt.subplots(2, 2, sharey=True, tight_layout=False, figsize=(20, 13))
ax = sns.set_style(style="darkgrid",rc= {'patch.edgecolor': 'black'})
ax = sns.set(font_scale=1)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, non_irrad, non_irrad, f'patient #{item} 1 non rad', 0, 0)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, irrad_4_Gy, non_irrad, f'patient #{item} 2 irrad @ 4 Gy', 0, 1)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, three_B, non_irrad, f'patient #{item} 3 B', 1, 0)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, four_C, non_irrad, f'patient #{item} 4 C', 1, 1)
def initialize_telo_data_1st_timepoint_variable(timepoint=None, df=None):
if timepoint in list(df['timepoint'].unique()):
variable = df[df['timepoint'] == str(timepoint)]['telo data exploded']
return variable
elif timepoint not in list(df['timepoint'].unique()):
variable = pd.DataFrame([[0,1],[0,1]])
return variable
def initialize_telo_data_timepoint_or_blank(timepoint, df):
if timepoint in list(df['timepoint'].unique()):
timepoint_telo_data = df[df['timepoint'] == str(timepoint)]['telo data exploded']
name_id = str(df['astro id'].unique()[0])
name_timepoint = f' {timepoint}'
name_total = 'dso' + name_id + name_timepoint
return name_total, timepoint_telo_data
elif timepoint not in list(df['timepoint'].unique()):
timepoint_telo_data = pd.DataFrame([0,1],[0,1])
name = ''
return name, timepoint_telo_data
########################################################################################################################
########################################################################################################################
# FUNCTIONS FOR GRAPHING INDIVIDUAL TELOMERES
########################################################################################################################
########################################################################################################################
def graph_four_histograms(quartile_ref, n_bins, df1, df2, df3, df4,
name1, name2, name3, name4):
n_bins = n_bins
fig, axs = plt.subplots(2,2, sharey=True, sharex=True, constrained_layout=True, figsize = (8, 6))
sns.set_style(style="darkgrid",rc= {'patch.edgecolor': 'black'})
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.rc('xtick',labelsize=16)
plt.rc('ytick',labelsize=16)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df1, quartile_ref, name1, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df2, quartile_ref, name2, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df3, quartile_ref, name3, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df4, quartile_ref, name4, 1, 1)
def graph_two_histograms(quartile_ref, n_bins, df1, df2,
name1, name2, controls=None):
n_bins = n_bins
fig, axs = plt.subplots(2, sharey=True, constrained_layout=True, figsize = (8, 6))
sns.set_style(style="darkgrid",rc= {'patch.edgecolor': 'black'})
for ax in axs.flat:
ax.label_outer()
plt.rc('xtick',labelsize=16)
plt.rc('ytick',labelsize=16)
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, df1, quartile_ref, name1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, df2, quartile_ref, name2, 1)
# csfont = {'fontname':'sans-serif'}
# plt.suptitle(f"Individual Telomere Length Distributions at Pre and Post-Flight: {name1[0:8]}",
# y=.95, fontsize=14, **csfont)
# if controls == True:
# csfont = {'fontname':'sans-serif'}
# plt.suptitle(f"Individual Telomere Length Distributions at Pre and Post-Flight: All Control Samples",
# y=.95, fontsize=14, **csfont)
def astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone, axsNUMtwo):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone,axsNUMtwo].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
modified_astroname = astroname.replace('astro', '')
axs[axsNUMone,axsNUMtwo].set_title(f"{modified_astroname}", fontsize=16,)
font_axes=16
if axsNUMone == 0 and axsNUMtwo == 0:
axs[axsNUMone,axsNUMtwo].set_ylabel("Individual Telomere Counts", fontsize=font_axes)
if axsNUMone == 1 and axsNUMtwo == 0:
axs[axsNUMone,axsNUMtwo].set_ylabel("Individual Telomere Counts", fontsize=font_axes)
axs[axsNUMone,axsNUMtwo].set_xlabel("Bins of Individual Telomeres (RFI)", fontsize=font_axes)
if axsNUMone == 1 and axsNUMtwo == 1:
axs[axsNUMone,axsNUMtwo].set_xlabel("Bins of Individual Telomeres (RFI)", fontsize=font_axes)
axs[axsNUMone,axsNUMtwo].xaxis.set_major_locator(plt.MaxNLocator(7))
def astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
axs[axsNUMone].set_title(f"{astroname}", fontsize=16,)
font_axes=16
if axsNUMone == 0 or axsNUMone == 1:
axs[axsNUMone].set_ylabel("Individual Telomere Counts", fontsize=font_axes)
if axsNUMone == 1:
axs[axsNUMone].set_xlabel("Bins of Individual Telomeres (RFI)", fontsize=font_axes)
axs[axsNUMone].xaxis.set_major_locator(plt.MaxNLocator(7))
def make_histograms_colored_by_quartile_for_astronauts(exploded_telos_df=None, astro_ids=None, nbins=45):
# astro_ids = ['5163', '2171', '1536', '7673', '4819', '3228', '2494', '2479', '2381', '1261', '1062']
grouped_data = exploded_telos_df.groupby('astro id')
# by looping through astronaut ids, we'll pull out their respective dataframes
# once we have the astronauts respective dfs, we'll figure out the quartile df &
for astro_id_num in astro_ids:
if astro_id_num not in grouped_data.groups.keys():
break
plot_df = grouped_data.get_group(astro_id_num)
for timepoint in ['L-270', 'L-180']:
first_timepoint = initialize_telo_data_1st_timepoint_variable(timepoint=timepoint, df=plot_df)
if first_timepoint.size > 30:
break
quartile_ref = first_timepoint
# okay, now we have the first timepoint as the reference for making quartile cutoffs!
# now need to intialize other values!
name_L270, astro_L270 = initialize_telo_data_timepoint_or_blank('L-270', plot_df)
name_L180, astro_L180 = initialize_telo_data_timepoint_or_blank('L-180', plot_df)
if '5163' == astro_id_num or '1536' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_telo_data_timepoint_or_blank('FD90', plot_df)
name_Mid2, astro_Mid2 = initialize_telo_data_timepoint_or_blank('FD140', plot_df)
if '2171' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_telo_data_timepoint_or_blank('FD45', plot_df)
name_Mid2, astro_Mid2 = initialize_telo_data_timepoint_or_blank('FD260', plot_df)
name_R180, astro_R180 = initialize_telo_data_timepoint_or_blank('R+180', plot_df)
name_R270, astro_R270 = initialize_telo_data_timepoint_or_blank('R+270', plot_df)
if ('5163' == astro_id_num) or ('2171' == astro_id_num) or ('1536' == astro_id_num):
n_bins = n_bins
if name_L270 != '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R270,
name_L270, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R180,
name_L270, name_Mid1, name_Mid2, name_R180)
elif name_L270 == '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R270,
name_L180, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R180,
name_L180, name_Mid1, name_Mid2, name_R180)
elif astro_id_num in ['7673', '4819', '3228', '2494', '2479', '2381', '1261', '1062']:
n_bins = 60
graph_two_histograms(quartile_ref, n_bins, astro_L270, astro_R270, name_L270, name_R270)
plt.savefig(f'../individual telomere length histogram distributions/png/dso{astro_id_num} histogram of individual telomere length distributions.png', dpi=600)
plt.savefig(f'../individual telomere length histogram distributions/svg/dso{astro_id_num} histogram of individual telomere length distributions.svg', format='svg', dpi=1500)
def initialize_encoded_telo_data_timepoint_or_blank(timepoint, df):
if timepoint in list(df['timepoint'].unique()):
timepoint_telo_data = df[df['timepoint'] == str(timepoint)]['telo data exploded']
name_id = str(df['encoded astro id'].unique()[0])
name_timepoint = f' {timepoint}'
name_total = 'astro ' + name_id + name_timepoint
return name_total, timepoint_telo_data
elif timepoint not in list(df['timepoint'].unique()):
timepoint_telo_data = pd.DataFrame([0,1],[0,1])
name = ''
return name, timepoint_telo_data
def make_histograms_colored_by_quartile_for_encoded_astronauts(exploded_telos_df=None, astro_ids=None, n_bins=60, save=True):
grouped_data = exploded_telos_df.groupby('encoded astro id')
for astro_id_num in astro_ids:
if astro_id_num not in grouped_data.groups.keys():
break
plot_df = grouped_data.get_group(astro_id_num)
for timepoint in ['L-270', 'L-180']:
first_timepoint = initialize_telo_data_1st_timepoint_variable(timepoint=timepoint, df=plot_df)
if first_timepoint.size > 30:
break
quartile_ref = first_timepoint
name_L270, astro_L270 = initialize_encoded_telo_data_timepoint_or_blank('L-270', plot_df)
name_L180, astro_L180 = initialize_encoded_telo_data_timepoint_or_blank('L-180', plot_df)
if 'A' == astro_id_num or 'C' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_encoded_telo_data_timepoint_or_blank('FD90', plot_df)
name_Mid2, astro_Mid2 = initialize_encoded_telo_data_timepoint_or_blank('FD140', plot_df)
if 'B' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_encoded_telo_data_timepoint_or_blank('FD45', plot_df)
name_Mid2, astro_Mid2 = initialize_encoded_telo_data_timepoint_or_blank('FD260', plot_df)
name_R180, astro_R180 = initialize_encoded_telo_data_timepoint_or_blank('R+180', plot_df)
name_R270, astro_R270 = initialize_encoded_telo_data_timepoint_or_blank('R+270', plot_df)
if ('B' == astro_id_num) or ('A' == astro_id_num) or ('C' == astro_id_num):
n_bins = n_bins
if name_L270 != '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R270,
name_L270, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R180,
name_L270, name_Mid1, name_Mid2, name_R180)
elif name_L270 == '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R270,
name_L180, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R180,
name_L180, name_Mid1, name_Mid2, name_R180)
if save:
plt.savefig(f'../MANUSCRIPT 2 ASTROS/figures/dso{astro_id_num} histogram of individual telomere length distributions.png',
bbox_inches='tight', dpi=600)
########################################################################################################################
########################################################################################################################
# FUNCTIONS FOR CORRELATING TELOMERES WITH ANALYTE DATA
########################################################################################################################
########################################################################################################################
def select_astros_of_interest(analyte_df, telomere_df, astro_ids_of_interest, target):
if 'astro id' in telomere_df.columns:
telomere_df['astro id'] = telomere_df['astro id'].astype('str')
if 'astro id' in analyte_df.columns:
analyte_df['astro id'] = analyte_df['astro id'].astype('str')
if 'sample type' in analyte_df.columns:
analyte_df.drop('sample type', axis=1, inplace=True)
# dropping unnecessary cols from telo df
for col in ['astro number', 'timepoint']:
if col in telomere_df.columns:
telomere_df.drop([col], axis=1, inplace=True)
trim_astro_df = telomere_df.copy()
if 'all astros' in astro_ids_of_interest:
# i.e as of 10/7/19 I only have n=4 (contains astro id col) & n=11 (no astro id) dataframes for analytes
# I think when I received n=3 astros.. just type astro ids for astro_ids_of_interest, it will work properly
# or.. if i receive n=11 dataframe with labeled astros..
# just rewrite this area to accept n=11 df w/ astro id col
if 'astro id' in analyte_df.columns:
(print("Possible error.. the astro id column is present.. all astros were requested but this df potentially"
"contains less than all 11 astros.. drop astro id col and retry"))
return
else:
# retain all astro ids
selected_astros = trim_astro_df
id_values = ['flight status']
elif 'all astros' not in astro_ids_of_interest:
# subset astro ids of interest
selected_astros = trim_astro_df[trim_astro_df['astro id'].isin(astro_ids_of_interest)].reset_index(drop=True)
id_values = ['astro id', 'flight status']
return analyte_df, selected_astros, id_values
def merge_analyte_telomere_data(analyte_df, selected_astros, id_values, telos_percent_change, target):
# take mean telomere length values of all astronauts or per astros of interest & merge with analytes
mean_selected_astros = selected_astros.groupby(id_values).agg('mean').reset_index()
if telos_percent_change == 'yes':
mean_selected_astros[target] = (mean_selected_astros[target]
.apply(lambda row: make_telos_percent_change(row)))
merge_analyte_df = analyte_df.merge(mean_selected_astros, on=id_values)
# prepare to drop any columns w/ missing data
indexer=['timepoint', target]
for id_value in id_values:
indexer.append(id_value)
return merge_analyte_df, indexer
def how_drop_missing_values(merge_analyte_df, how_drop_missing, indexer):
# drop every analyte (columns) with missing data
if how_drop_missing == 'by column':
pivot_merge = (merge_analyte_df.pivot_table(index=indexer, columns='biochemistry analyte',
values='measured analyte').reset_index())
pivot_merge.dropna(axis=1, inplace=True)
cleaned_data = pivot_merge.melt(id_vars=indexer, var_name='biochemistry analyte',
value_name='measured analyte').reset_index(drop=True)
# drop missing data on per analyte/timepoint/astro (row) basis
elif how_drop_missing == 'by melted row':
cleaned_data = merge_analyte_df.dropna(axis=0)
return cleaned_data
def retain_flight_status(cleaned_data, retain_what_flight_status):
# retaining analytes for which flight status
if retain_what_flight_status == 'any':
retained_data = cleaned_data
elif bool(set(retain_what_flight_status) & set(['Pre-Flight', 'Mid-Flight', 'Post-Flight'])) == True:
retained_data = cleaned_data[cleaned_data['flight status'].isin(retain_what_flight_status)].copy()
elif retain_what_flight_status == 'require at least one per status':
total_analytes = list(cleaned_data['biochemistry analyte'].unique())
analytes_3_unique_flight = []
groupby_analyte = cleaned_data.groupby('biochemistry analyte')
for analyte in total_analytes:
# make groups by analyte
get_group_by_analyte = groupby_analyte.get_group(analyte)
# look at unique flight status values per analyte
g_f_s_t = list(get_group_by_analyte['flight status'].unique())
# if pre, mid, and post flight values in unique value list per analyte, then add this analyte to a list
if 'Pre-Flight' in g_f_s_t and 'Mid-Flight' in g_f_s_t and 'Post-Flight' in g_f_s_t:
analytes_3_unique_flight.append(analyte)
# retain only analytes with at least one measurement per flight status
analytes_only_3_unique_df = cleaned_data[cleaned_data['biochemistry analyte'].isin(analytes_3_unique_flight)].copy()
return analytes_only_3_unique_df
return retained_data
def make_telos_percent_change(row):
percent_chg_telos = ((row - 0.938117) / 0.938117) * 100
return percent_chg_telos
def correlate_astro_analytes_telomeres_pipeline(analyte_df=None, telomere_df=None, target=None,
astro_ids_of_interest=None,
how_drop_missing=None, retain_what_flight_status=None,
telos_percent_change='no'):
"""
High level fxn description
Args:
analyte_df (pandas dataframe): Contains either n=4 or n=11 biochemical analyte data in tidy data format.
telomere_df (pandas dataframe): Must contain complete telomere length data in tidy data format.
astro_ids_of_interest (str or list of str): Accepts either 'all astros' as str, whereby all astronaut data is
used for correlating telo/analyte data, or a list of astro ids to subset data for analysis.
how_drop_missing (str): Accepts either 'by column', which drops any analyte containing at least one missing value,
or 'by melted row', which drops only single instances of missing values.
retain_what_flight_status (str or list of tring): decides how to subset individual analytes based on what
flight status labels they have
Accepts: 'any', whereby no subselection is placed on analytes based on flight status,
or: subset data by flight status (list of str) for all analytes as a GROUP i.e ['Pre-Flight'] or ['Pre-Flight', 'Post-Flight']
or: 'require at least one per status', where EACH analytes must have at least one measurement per flight status
Returns:
retained_data (pandas dataframe): Data subject to the processing steps described above.
"""
# selecting astros of interest & capturing id values for handling merges
analyte_df, selected_astros, id_values = select_astros_of_interest(analyte_df, telomere_df, astro_ids_of_interest, target)
# merging analyte & telomere data, capturing indexer for handling missing data
merge_analyte_df, indexer = merge_analyte_telomere_data(analyte_df, selected_astros, id_values, telos_percent_change, target)
# dropping missing values based on input
cleaned_data = how_drop_missing_values(merge_analyte_df, how_drop_missing, indexer)
# subsetting values based on flight status labels
retained_data = retain_flight_status(cleaned_data, retain_what_flight_status)
return retained_data
def find_high_correlates_analytes_mean_telos(merged_analyte_blood_tidy_df, corr_cutoff, corr_loc=0, astro_ids=False, target=None):
if astro_ids == False:
corr_value_tests = []
grouped_by_analyte = merged_analyte_blood_tidy_df.groupby('biochemistry analyte')
for group in list(merged_analyte_blood_tidy_df['biochemistry analyte'].unique()):
corr_value = grouped_by_analyte.get_group(group).corr()[target][corr_loc]
if abs(corr_value) > corr_cutoff:
corr_value_tests.append([group, corr_value])
# print(f"{group}: {corr_value:.4f}")
return corr_value_tests
elif astro_ids == True:
corr_value_requested = input('Please state index for correlation value in corr().. 0 or 1')
corr_value_tests = []
astro_ids = list(merged_analyte_blood_tidy_df['astro id'].unique())
astro_id_group = merged_analyte_blood_tidy_df.groupby('astro id')
for astro in astro_ids:
individ_astro_df = astro_id_group.get_group(astro)
analyte_grouped_by_individ = individ_astro_df.groupby('biochemistry analyte')
analytes = list(individ_astro_df['biochemistry analyte'].unique())
for analyte in analytes:
corr_value = analyte_grouped_by_individ.get_group(analyte).corr()[target][int(corr_value_requested)]
corr_value_tests.append([astro, analyte, corr_value])
return corr_value_tests
def plot_diverging_correlations(list_correlates=None, target_name=None, figsize=(11,7),
dpi=600, color1='black', color2='green', fontsize=16,
y_label_name='Blood biochemistry analytes',
path_labels='', save=True):
df = list_correlates.copy()
x = df['correlation value']
df['colors'] = [color2 if x < 0 else color1 for x in df['correlation value']]
df.sort_values('correlation value', inplace=True)
df.reset_index(inplace=True, drop=True)
plt.figure(figsize=figsize, dpi=dpi)
plt.hlines(y=df.index, xmin=0, xmax=df['correlation value'], color=df['colors'], alpha=0.6, linewidth=7)
# Decorations
plt.yticks(df.index, df['biochemistry analyte'], fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.xlabel(target_name, fontsize=fontsize)
plt.ylabel(y_label_name, fontsize=fontsize)
plt.grid(linestyle='-', alpha=.2, color='black')
plt.tight_layout()
my_xticks = np.array([-1, -.5, 0, .5, 1])
plt.xticks(my_xticks[::1])
if save:
plt.savefig(f'../MANUSCRIPT 11 ASTROS/figures/11 astros diverging bars {y_label_name} {target_name} {path_labels} n=11.png',
dpi=dpi, bbox_inches='tight')
def analyze_biochem_analytes_target(df=None, target=None, melt_biochem_df=None,
merge_telomere_biochem_data=False, astro_ids_of_interest='all astros',
parse_correlation_values=True, abs_value_corr=0.6,
parse_corr_min=0, parse_corr_max=0.8,
color1='black', color2='green', fontsize=16,
figsize=(9,5), y_label_name='Blood biochemistry analytes',
path_labels='', save=True):
if merge_telomere_biochem_data == True:
# merge analyte & telomere data
merged_df = correlate_astro_analytes_telomeres_pipeline(analyte_df=melt_biochem_df, telomere_df=df,
target=target, astro_ids_of_interest=astro_ids_of_interest,
how_drop_missing='by melted row',
retain_what_flight_status='require at least one per status',
telos_percent_change='no')
elif merge_telomere_biochem_data == False:
merged_df = df.copy()
# find highly correlated analytes
corr_value_tests = find_high_correlates_analytes_mean_telos(merged_df, abs_value_corr, corr_loc=0,
astro_ids=False, target=target)
# turn correlated analytes/mean telomere length into dataframe
blood_n11_high_corr_values = pd.DataFrame(corr_value_tests, columns=['biochemistry analyte', 'correlation value'])
if parse_correlation_values:
blood_n11_high_corr_values = blood_n11_high_corr_values[(blood_n11_high_corr_values['correlation value'] < parse_corr_min) |
(blood_n11_high_corr_values['correlation value'] > parse_corr_max)].copy()
# plot diverging bars correlates
plot_diverging_correlations(list_correlates=blood_n11_high_corr_values,
target_name=target, figsize=figsize,
color1=color1, color2=color2, save=save,
y_label_name=y_label_name, fontsize=fontsize,
path_labels=path_labels)
return merged_df
def scipy_anova_post_hoc_tests(df=None, flight_status_col='flight status', target='telo data per cell',
sig_test=stats.f_oneway, post_hoc=sp.posthoc_ttest):
g_1 = df[df[flight_status_col] == 'Pre-Flight'][target]
g_2 = df[df[flight_status_col] == 'Mid-Flight'][target]
g_3 = df[df[flight_status_col] == 'Post-Flight'][target]
statistic, p_value = sig_test(g_1, g_2, g_3)
print(f'ONE WAY ANOVA for telomere length: {p_value}')
# if anova detects sig diff, perform post-hoc tests
if p_value <= 0.05:
print('bonferroni')
display(sp.posthoc_ttest(df, val_col=target, group_col=flight_status_col, equal_var=True,
p_adjust=None))
def telos_scipy_anova_post_hoc_tests(df0=None, time_col='flight status', target='individual telomeres',
sig_test=stats.f_oneway, post_hoc=None, repeated_measures=False):
df = df0.copy()
df.rename({'telo data per cell': 'telo_data_per_cell',
'flight status': 'flight_status',
'Mean Telomere Length (qPCR)': 'Mean_Telomere_Length_(qPCR)',
'Telomerase Activity (qPCR)': 'Telomerase Activity (qPCR)',
'astro id': 'astro_id'}, axis=1, inplace=True)
if ' ' in time_col:
time_col = time_col.replace(' ', '_')
if ' ' in target:
target = target.replace(' ', '_')
if repeated_measures == False:
g_1 = df[df[time_col] == 'Pre-Flight'][target]
g_2 = df[df[time_col] == 'Mid-Flight'][target]
g_3 = df[df[time_col] == 'Post-Flight'][target]
statistic, p_value = sig_test(g_1, g_2, g_3)
print(f'ONE WAY ANOVA for telomere length: {p_value}')
elif repeated_measures:
results = AnovaRM(df, target, 'astro_id',
within=[time_col], aggregate_func='mean').fit()
# pvalue
p_value = results.anova_table['Pr > F'][0]
print(f'REPEATED MEASURES ANOVA for telomere length: {p_value}')
# if anova detects sig diff, perform post-hoc tests
if p_value <= 0.05:
mc = MultiComparison(df[target], df[time_col])
mc_results = mc.tukeyhsd()
print(mc_results)
res = mc_results
print(f'TukeyHSD pvalues: {list(psturng(np.abs(res.meandiffs / res.std_pairs), len(res.groupsunique), res.df_total))}')
# print('\nbonferroni pvalues')
# display(sp.posthoc_ttest(df, val_col=target, group_col=time_col, equal_var=False,
# p_adjust='bonferroni'))
def id_encode_letters(row):
if row == '1536':
row = 'A'
elif row == '2171':
row = 'B'
return row
def eval_make_test_comparisons(df=None, timepoints=None, test=None, test_name=None,
target='individual telos'):
timepoints = list(df['timepoint'].unique())
timept_pairs = []
row = []
df_list = []
for timept in timepoints:
df_list.append(df[df['timepoint'] == timept][target])
for iter1, df in zip(timepoints, df_list):
for iter2, i in zip(timepoints, range(len(df_list))):
pair1, pair2 = f"{iter1}:{iter2}", f"{iter2}:{iter1}"
if iter1 != iter2 and pair1 not in timept_pairs and pair2 not in timept_pairs:
stat, pvalue = test(df, df_list[i])
print(f'{test_name} | {iter1} vs {iter2} {pvalue}')
timept_pairs.append(pair1)
timept_pairs.append(pair2)
row.append([test_name, iter1, iter2, pvalue])
return timept_pairs, row
def make_post_flight_df_and_merge(astro_df=None, exploded_telos=None, timepoint=None):
"""
parse out mean telomere length & #s short/long telomeres from specific post-flight (R+7, R+60, ... R+270) timepoints
and merge with exploded_telos dataframe for machine learning prep
"""
# parsing out post-flight data of interest
timepoint_df = astro_df[astro_df['timepoint'] == timepoint].copy()
for col in ['telo means', 'Q1', 'Q4']:
timepoint_df.rename({col: f'{timepoint} {col}'}, axis=1, inplace=True)
timepoint_df.drop(['astro number', 'timepoint', 'flight status'], axis=1, inplace=True)
# extracting pre-flight individual telomere data only
exploded_telos_pref = exploded_telos[exploded_telos['flight status'] == 'Pre-Flight'].copy()
exploded_telos_pref.drop(['astro number', 'flight status'], axis=1, inplace=True)
merge_df = exploded_telos_pref.merge(timepoint_df, on=['astro id'])
return merge_df
class make_features(BaseEstimator, TransformerMixin):
def __init__(self, make_log_individ_telos=False, make_log_target=False):
self.make_log_individ_telos = make_log_individ_telos
self.make_log_target = make_log_target
def fit(self, X, y=None):
return self
def create_log_individ_telos(self, X, y=None):
X['individual telos'] = np.log1p(X['individual telos'])
return X
def create_log_target(self, X, y=None):
X['4 C telo means'] = np.log1p(X['4 C telo means'])
return X
def transform(self, X, y=None):
if self.make_log_individ_telos:
X = self.create_log_individ_telos(X)
if self.make_log_target:
X = self.create_log_target(X)
return X
class make_dummies(BaseEstimator, TransformerMixin):
def __init__(self, drop_first=True, cols_to_dummify=['timepoint'], how_dummify='encode'):
self.drop_first = drop_first
self.cols_to_dummify = cols_to_dummify
self.how_dummify=how_dummify
def fit(self, X, y=None):
return self
def transf_dummies(self, X, y=None):
dummies = pd.get_dummies(X, drop_first=self.drop_first, columns=self.cols_to_dummify)
return dummies
def label_encode(self, X, y=None):
label_encoder = preprocessing.LabelEncoder()
X['encoded_timepoint'] = label_encoder.fit_transform(X[self.cols_to_dummify].values.ravel())
X.drop(['timepoint'], axis=1, inplace=True)
return X
def transform(self, X, y=None):
if self.how_dummify == 'get_dummies':
X = self.transf_dummies(X)
elif self.how_dummify == 'encode':
X = self.label_encode(X)
return X
class clean_data(BaseEstimator, TransformerMixin):
def __init__(self, drop_astro_id=True, timepoint='R+7', target='telo means'):
self.drop_astro_id = drop_astro_id
self.timepoint_target = f'{timepoint} {target}'
self.timepoint = timepoint
self.target = target
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# enforcing col types
cols = list(X.columns)
for col in cols:
if 'individual telomeres' in col or 'telo means' in col:
X[col] = X[col].astype('float64')
else:
X[col] = X[col].astype('int64')
if self.drop_astro_id:
X.drop(['astro id'], axis=1, inplace=True)
X.reset_index(drop=True, inplace=True)
target_cols = ['telo means', 'Q1', 'Q4']
target_cols.remove(self.target)
for item in target_cols:
for col in X.columns:
if item in col:
X.drop([col], axis=1, inplace=True)
# if 'telo means' in self.target:
# X.drop([f'{timepoint} Q1', f'{timepoint} Q4'], axis=1, inplace=True)
# elif 'Q1' in self.target:
# X.drop([f'{timepoint} Q1', f'{timepoint} Q4'], axis=1, inplace=True)
# X = X[['encoded_timepoint', 'individual telomeres', self.timepoint_target]].copy()
return X
def cv_score_fit_mae_test(train_set=None, test_set=None, target=None,
model=None, cv=5, scoring='neg_mean_absolute_error', verbose=True):
random.seed(888)
row = []
features = [col for col in train_set.columns if col != target and col != 'astro id']
X_train = train_set[features].copy()
X_test = test_set[features].copy()
y_train = train_set[target].copy()
y_test = test_set[target].copy()
# cv
scores = -1 * cross_val_score(model, X_train, y_train, cv=5, scoring=scoring)
if verbose:
print(f'MAE per CV fold: \n{scores} \n')
print(f'MEAN of MAE all folds: {scores.mean()}')
print(f'STD of MAE all folds: {scores.std()}\n')
# fitting the model
model.fit(X_train, y_train)
# predict y_test from X_test - this is using the train/test split w/o shuff;ing
predict_y_test = model.predict(X_test)
if verbose:
print(f"MAE of predict_y_test & y_test: {mean_absolute_error(y_test, predict_y_test)}")
print(f'R2 between predict_y_test & y_test: {r2_score(y_test, predict_y_test)}')
row.append(['XGBoost', features, target, round(scores.mean(), 4),
round(scores.std(), 4),
round(mean_absolute_error(y_test, predict_y_test), 4),
round(r2_score(y_test, predict_y_test), 4)])
return model, row
def myMetric(x, y):
r = stats.pearsonr(x, y)[0]
return 1 - r
def plot_dendogram(Z, target=None, indexer=None):
with plt.style.context('fivethirtyeight' ):
plt.figure(figsize=(10, 2.5))
plt.title(f'Dendrogram of clusters by {target}', fontsize=22, fontweight='bold')
plt.xlabel('astro IDs', fontsize=22, fontweight='bold')
plt.ylabel('distance', fontsize=22, fontweight='bold')
hac.dendrogram(Z, labels=indexer, leaf_rotation=90., # rotates the x axis labels
leaf_font_size=15., ) # font size for the x axis labels
plt.show()
def plot_results2(timeSeries, D, cut_off_level, y_size, x_size, verbose, time, target):
result = pd.Series(hac.fcluster(D, cut_off_level, criterion='maxclust'))
if verbose:
clusters = result.unique()
fig = plt.subplots(figsize=(x_size, y_size))
mimg = math.ceil(cut_off_level/2.0)
gs = gridspec.GridSpec(mimg,2, width_ratios=[1,1])
cluster_indexed = pd.concat([result, timeSeries.reset_index(drop=True)], axis=1)
columns = list(cluster_indexed.columns[1:])
columns = ['clusters'] + columns
cluster_indexed.columns = columns
for ipic, c in enumerate(clusters):
clustered = cluster_indexed[cluster_indexed['clusters'] == c].copy()
print(ipic, "Cluster number %d has %d elements" % (c, len(clustered['astro id'])))
melt = clustered.melt(id_vars=['astro id', 'clusters'], var_name=time,value_name=target)
ax1 = plt.subplot(gs[ipic])
sns.lineplot(x=time, y=target, hue='astro id', data=melt, legend=False, ax=ax1)
ax1.set_title((f'Cluster number {c}'), fontsize=15, fontweight='bold')
plt.tight_layout()
return result
def cluster_data_return_df(df, target='inversions', time='timepoint', cut_off_n=4,
metric=myMetric, method='single',
y_size=6, x_size=10, verbose=True):
df = df.copy()
label_enc = LabelEncoder()
labels = list(df[time])
encoded_labels = list(LabelEncoder().fit_transform(df[time]))
cypher_dict = dict(zip(encoded_labels, labels))
df[time] = LabelEncoder().fit_transform(df[time])
df = df.pivot(index='astro id', columns=time, values=target).reset_index()
# run the clustering
cluster_Z = hac.linkage(df, method=method, metric=metric)
if verbose:
plot_dendogram(cluster_Z, target=target, indexer=df.index)
# return df bearing cluster groups
indexed_clusters = plot_results2(df, cluster_Z, cut_off_n, y_size, x_size, verbose, time, target)
# concat clusters to original df and return
ready_concat = df.reset_index(drop=True)
clustered_index_df = pd.concat([ready_concat, indexed_clusters], axis=1)
clustered_index_df.columns = list(clustered_index_df.columns[:-1]) + [f'{target} cluster groups']
melted = clustered_index_df.melt(id_vars=['astro id', f'{target} cluster groups'], var_name=time, value_name=target)
melted[time] = melted[time].apply(lambda row: cypher_dict[row])
return melted
def fish_assign_clustering(row):
cluster_dict = {'5163': 1,
'2381': 1,
'2494': 1,
'1261': 2,
'1536': 2,
'7673': 2,
'2171': 2,
'4819': 2,
'3228': 3,
'1062': 3,
'2479': 3}
return cluster_dict[row]
def qpcr_assign_cluster(row):
cluster_grp_dict = {'2381': 1,
'4819': 2,
'5163': 2,
'2171': 2,
'3228': 2,
'1062': 2,
'2494': 2,
'7673': 2,
'2479': 3,
'1261': 3,
'1536': 3}
return cluster_grp_dict[row]
def graph_cluster_groups(df, time=None, target=None, hue=None, colors='Set1',
n_cols=3, y_label_name=None, figsize=(7,3.2),
fontsize=14, save=True, bbox_to_anchor=(0.5, 1.21),
y_lim=None, path_labels='11 astros',
markersize=13, markerscale=2, handlelength=1.22):
colors = sns.color_palette(colors)
plt.figure(figsize=figsize)
ax = sns.lineplot(x=time, y=target, data=df, hue=hue, markers=True,
palette=sns.color_palette(colors[:len(df[hue].unique())]),
style=hue, **{'markersize': markersize, 'mec': 'black', 'mew': 1})
plt.setp(ax.get_xticklabels(),
# rotation=45,
fontsize=fontsize)
ax.set_ylabel(f'{y_label_name}', fontsize=fontsize)
ax.set_xlabel('', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
legend = ax.legend()
legend.texts[0].set_text('Cluster groups')
if y_lim != None:
ax.set_ylim(y_lim)
plt.legend(loc='upper center', bbox_to_anchor=bbox_to_anchor, handlelength=handlelength,
ncol=n_cols, fancybox=True, fontsize=fontsize, markerscale=markerscale)
if save:
plt.savefig(f'../MANUSCRIPT 11 ASTROS/figures/{path_labels} lineplot {target} clustering.png',
dpi=600, bbox_inches = "tight")
def convert_mid_timepoint(row):
if row == 'FD45' or row == 'FD90':
return 'Mid-1'
elif row == 'FD140' or row == 'FD260':
return 'Mid-2'
else:
return row
def set_categories_sort(telomere_df=None, time='timepoint', sort_list=None):
df = telomere_df.copy()
if sort_list == None:
sort_list = ['L-270', 'L-180', 'L-60', 'R+7', 'R+60', 'R+180', 'R+270']
df[time] = df[time].astype('category')
df[time].cat.set_categories(sort_list, inplace=True)
return df
def ext_telo_data_longitudinal_clustering(telomere_df=None,
astro_id='astro id',
telomere_col_name='telo means',
col_to_pivot='timepoint',
timepts_of_interest=None):
df = telomere_df.copy()
if timepts_of_interest == None:
timepts_of_interest = ['L-270', 'L-180', 'L-60', 'R+7', 'R+60', 'R+180', 'R+270']
# parse cols of interest
parsed_df = df[[astro_id, col_to_pivot, telomere_col_name]].copy()
parsed_df[col_to_pivot] = parsed_df[col_to_pivot].astype('str')
# pivot out timepoints
pivot_df = parsed_df.pivot_table(index=[astro_id], columns=col_to_pivot, values=telomere_col_name).reset_index()
pivot_df.set_index(astro_id, inplace=True)
cluster_ready_df = pivot_df[timepts_of_interest].copy()
return cluster_ready_df
def rename_imputed_df(imputed_df=None, original_df=None):
imputed_df.columns = original_df.columns
imputed_df.index = original_df.index
imputed_df.columns.name = ''
return imputed_df
def clustermap_plot(df=None, method='single', metric='correlation',
color_map='PRGn', col_cluster=False, fontsize=14, z_score=0,
y_label='Mean Telomere Length (Telo-FISH)', path_labels='11 astros',
save=True):
g = sns.clustermap(df, method=method, metric=metric, z_score=z_score, figsize=(7,7),
cmap=color_map, col_cluster=col_cluster)
# colorbar
g.cax.set_position([-0.05, .2, .03, .45])
g.cax.set_ylabel(y_label, rotation=90, fontsize=fontsize)
g.cax.tick_params(labelsize=12)
# modifying y axis
g.ax_heatmap.set_ylabel('Astronaut ID', fontsize=fontsize)
g.ax_heatmap.set_xlabel('')
labels = g.ax_heatmap.yaxis.get_majorticklabels()
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=fontsize)
plt.setp(g.ax_heatmap.yaxis.get_minorticklabels(), fontsize=fontsize)
g.ax_heatmap.set_yticklabels(labels, rotation=0, fontsize=fontsize, va="center")
# modifying x axis
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=45, fontsize=fontsize)
for a in g.ax_row_dendrogram.collections:
a.set_linewidth(1)
for a in g.ax_col_dendrogram.collections:
a.set_linewidth(1)
if save:
plt.savefig(f'../MANUSCRIPT 11 ASTROS/figures/{path_labels} {y_label} cluster map.png', dpi=600, bbox_inches = "tight")
def flight_status(row):
if 'FD90' in row or 'FD45' in row:
return 'Mid-Flight'
elif 'FD140' in row or 'FD260' in row:
return 'Mid-Flight'
elif 'L' in row:
return 'Pre-Flight'
elif 'R' in row:
return 'Post-Flight'
# def encode_timepts(row):
# encode_dict = {'L-270' : 1,
# 'R+7': 2,
# 'R+270': 3}
# return encode_dict[row]
# def myMetric(x, y):
# r = stats.pearsonr(x, y)[0]
# return 1 - r
# def plot_dendogram(Z, target=None, indexer=None):
# with plt.style.context('fivethirtyeight' ):
# plt.figure(figsize=(10, 2.5))
# plt.title(f'Dendrogram of clusters by {target}', fontsize=22, fontweight='bold')
# plt.xlabel('astro IDs', fontsize=22, fontweight='bold')
# plt.ylabel('distance', fontsize=22, fontweight='bold')
# hac.dendrogram(Z, labels=indexer, leaf_rotation=90., # rotates the x axis labels
# leaf_font_size=15., ) # font size for the x axis labels
# plt.show()
# def plot_results(timeSeries, D, cut_off_level, y_size, x_size, verbose):
# result = pd.Series(hac.fcluster(D, cut_off_level, criterion='maxclust'))
# if verbose:
# clusters = result.unique()
# fig = plt.subplots(figsize=(x_size, y_size))
# mimg = math.ceil(cut_off_level/2.0)
# gs = gridspec.GridSpec(mimg,2, width_ratios=[1,1])
# cluster_indexed = pd.concat([result, timeSeries.reset_index()], axis=1)
# cluster_indexed.rename({0: 'clusters'}, axis=1, inplace=True)
# for ipic, c in enumerate(clusters):
# clustered = cluster_indexed[cluster_indexed['clusters'] == c].copy()
# print(ipic, "Cluster number %d has %d elements" % (c, len(clustered['astro id'])))
# clustered.drop(['index'], axis=1, inplace=True)
# melt = clustered.melt(id_vars=['astro id', 'clusters'], var_name='timepoint',value_name='telo means')
# melt = set_categories_sort(telomere_df=melt, sort_list=['L-270', 'R+7', 'R+270'])
# ax1 = plt.subplot(gs[ipic])
# melt
# sns.lineplot(x='timepoint', y='telo means', hue='astro id', data=melt, legend=False, ax=ax1)
# ax1.set_title((f'Cluster number {c}'), fontsize=15, fontweight='bold')
# plt.tight_layout()
# return result
# def cluster_telomere_data_return_df(df=None, target='telo means', cut_off_n=4,
# metric=myMetric, method='single',
# y_size=6, x_size=10, verbose=True):
# # astro_ids = df.index
# # knn_telo_qpcr.reset_index(drop=True, inplace=True)
# # run the clustering
# cluster_Z = hac.linkage(df, method=method, metric=metric)
# if verbose:
# plot_dendogram(cluster_Z, target=target, indexer=df.index)
# # return df bearing cluster groups
# df0 = df.copy().reset_index()
# indexed_clusters = plot_results(df0, cluster_Z, cut_off_n, y_size=y_size, x_size=x_size, verbose=verbose)
# # concat clusters to original df and return
# ready_concat = df.reset_index()
# clustered_index_df = pd.concat([ready_concat, indexed_clusters], axis=1)
# clustered_index_df.rename(columns={clustered_index_df.columns[-1]: f'{target} cluster groups',
# 1: 'L-270',
# 2: 'R+7',
# 3: 'R+270'}, inplace=True)
# melted = clustered_index_df.melt(id_vars=['astro id', f'{target} cluster groups'],
# var_name='timepoint', value_name=target)
# return melted
# def graph_cluster_groups(df, target=None, hue=None, figsize=(7,3.2), ncol=3):
# flatui = ["#9b59b6", "#2ecc71", "#e74c3c", "#95a5a6", "#34495e", "#3498db"]
# plt.figure(figsize=figsize)
# ax = sns.lineplot(x='timepoint', y=target, data=df, hue=hue,
# palette=sns.color_palette(flatui[:len(df[hue].unique())]),
# style=hue)
# plt.setp(ax.get_xticklabels(),
# # rotation=45,
# fontsize=14)
# if target == 'telo means':
# ax.set_ylabel('Mean Telomere Length (Telo-FISH)', fontsize=14)
# elif '(qPCR)' in target:
# ax.set_ylabel('Mean Telomere Length (qPCR)', fontsize=14)
# ax.set_xlabel('', fontsize=14)
# ax.tick_params(labelsize=14)
# legend = ax.legend()
# legend.texts[0].set_text('Cluster groups')
# plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.18),
# ncol=ncol, fancybox=True, fontsize=14)
# plt.savefig(f'11 astronauts CLUSTERING {target}.png',
# dpi=600, bbox_inches = "tight")
def combine_midflight(row):
if 'mid-flight 1' in row or 'mid-flight 2' in row:
row = 'mid-flight'
return row
else:
return row
def scipy_anova_post_hoc_tests(df=None, flight_status_col='flight status new',
sig_test=stats.f_oneway, post_hoc=sp.posthoc_ttest,
equal_var=False, pool_sd=False, repeated_measures=False):
"""
df should be melted by aberration type
"""
# make list of aberrations
aberrations = list(df['aberration type'].unique())
# loop through aberrations & perform anovas between pre/mid/post
for aberr in aberrations:
if repeated_measures == False:
g_1 = df[(df[flight_status_col] == 'Pre-Flight') & (df['aberration type'] == aberr)]['count per cell']
g_2 = df[(df[flight_status_col] == 'Mid-Flight') & (df['aberration type'] == aberr)]['count per cell']
g_3 = df[(df[flight_status_col] == 'Post-Flight') & (df['aberration type'] == aberr)]['count per cell']
statistic, p_value = sig_test(g_1, g_2, g_3)
print(aberr, p_value)
elif repeated_measures:
results = AnovaRM(df[df['aberration type'] == aberr].copy(), 'count per cell', 'astro id',
within=[flight_status_col], aggregate_func='mean').fit()
# pvalue
p_value = results.anova_table['Pr > F'][0]
# if anova detects sig diff, perform post-hoc tests
if p_value <= 0.05:
display(sp.posthoc_ttest(df[df['aberration type'] == aberr], val_col='count per cell',
group_col='flight status new', equal_var=equal_var, p_adjust='bonferroni',
pool_sd=pool_sd))
print('\n')
def rename_aberr(row):
if row == 'sister chromatid exchanges':
return 'classic SCEs'
elif row == 'total inversions':
return 'inversions'
elif row == 'satellite associations':
return 'sat. associations'
else:
return row
def rename_flights(row):
if row == 'pre-flight':
return 'Pre-Flight'
elif row == 'mid-flight':
return 'Mid-Flight'
elif row == 'post-flight':
return 'Post-Flight'
def pull_telofish_df():
telof_df = pd.read_csv('../data/compiled and processed data/exploded_cells_astros_df.csv')
telof_df_grouped = telof_df.groupby(by=['astro id', 'timepoint', 'flight status']).agg('mean').reset_index()
telof_df_grouped['astro id'] = telof_df_grouped['astro id'].astype('int64')
return telof_df_grouped
def pull_qpcr_df():
# astronauts telomere qpcr df
qpcr_df = pd.read_excel('../data/raw data/qpcr_telomere_astros.xlsx', usecols=[0, 1, 2])
qpcr_df.dropna(axis=0, inplace=True)
qpcr_df['astro id'] = qpcr_df['astro id'].astype('int64')
qpcr_df['flight status'] = qpcr_df['timepoint'].apply(lambda row: flight_status(row))
qpcr_grouped = qpcr_df.groupby(by=['astro id', 'timepoint', 'flight status']).agg('mean').reset_index()
qpcr_grouped['timepoint'] = qpcr_grouped['timepoint'].apply(lambda row: convert_mid_timepoint(row))
return qpcr_grouped
def pull_aberr_df():
melt_all_astro_chr_aberr = | pd.read_csv('../data/compiled and processed data/All_astronauts_chromosome_aberration_data_tidy_data.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
compared with version 1.6.4
the update is from correlation coefficient
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
def improved_PCC(signal_in):
output_corr = pd.DataFrame()
for i in range(44):
row_pcc_notremovemean = []
for j in range(44):
sig_1 = signal_in.iloc[i, :]
sig_2 = signal_in.iloc[j, :]
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2)))
row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean)
output_corr = output_corr.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True)
output_corr = output_corr.iloc[22:44, 0:22]
return output_corr
###############################################################################
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')
# Mac
# df_EFR=pd.read_pickle('/Users/bruce/Documents/uOttawa/Master‘s Thesis/4.Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# without window function
df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function
# implement the window function
df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True))
df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"])
df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :])
df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True)
df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :])
df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True)
# this part was replaced by upper part based on what I need to do
'''
# average all the subjects , test and retest, different sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
# average sound levels and
# keep vowel condition and subject
df_EFR_avg_vcs = pd.DataFrame()
for i in range(176):
# combine next two rows later
df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7))
df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True)
# set the title of columns
df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
#df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"])
'''
'''
# filter by 'a vowel and 85Db'
df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')]
df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True)
df_EFR_a_85_avg = pd.DataFrame()
# average test1 and test2
for i in range(44):
df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 9606
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_85_withzero = | pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1) | pandas.concat |
import pandas as pd
import dash
import dash_cytoscape as cyto
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
data= | pd.read_csv('EDGE.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
def build_raw():
df = pd.DataFrame()
# Create columns to store data
df.insert(0, "Iteracion", pd.Series([], dtype=int))
df.insert(1, "pos_gain", pd.Series([], dtype=float))
df.insert(2, "vel_gain", pd.Series([], dtype=float))
df.insert(3, "vel_integrator_gain", pd.Series([], dtype=float))
df.insert(4, "pos_estimate", pd.Series([], dtype=object))
df.insert(5, "input_pos", pd.Series([], dtype=object))
df.insert(6, "Iq_measured", pd.Series([], dtype=object))
df.insert(7, "vel_estimate", pd.Series([], dtype=object))
return df
def add_raw(df, id, kp, kv, kvi, estimates, inputs, currents, vels):
row = [id, kp, kv, kvi, estimates, inputs, currents, vels]
df.loc[len(df)] = row
return df
def clean_data(data):
gains = data.iloc[:, :4]
estimates = data.iloc[:, 4]
inputs = data.iloc[:, 5]
currents = data.iloc[:, 6]
vels = data.iloc[:, 7]
#errors = ([np.subtract(inputs[i], estimates[i]) for i in range(inputs.size)])
sample_half = len(estimates[0])//2
errors = list(map(lambda i,e: np.subtract(i,e), inputs, estimates))
lag_error = [sum(filter(lambda e: e>0, iter)) for iter in errors[0:sample_half]] + [sum(filter(lambda e: e<0, iter)) for iter in errors[sample_half:]]
ahead_error = [sum(filter(lambda e: e<0, iter)) for iter in errors[0:sample_half]] + [sum(filter(lambda e: e>0, iter)) for iter in errors[sample_half:]]
err_sum = list(map(lambda l,a: np.add(np.abs(l), np.abs(a)), lag_error, ahead_error))
overshoot_error = list(map(lambda i,e: max(e)-max(i), inputs, estimates))
current_sum = [sum(np.abs(iter)) for iter in currents]
curr_vel = list(map(lambda c,v: np.mean(np.abs(c))/np.mean(np.abs(v)), currents, vels))
df = gains
df.insert(4, "lag_error", lag_error)
df.insert(5, "ahead_error", ahead_error)
df.insert(6, "error_sum", err_sum)
df.insert(7, "overshoot_error", overshoot_error)
df.insert(8, "current_sum", current_sum)
df.insert(9, "curr_vel", curr_vel)
return df
def get_results(clean):
top = clean[clean.error_sum == clean.error_sum.max()]
top.insert(10, "mass", np.mean(clean.loc[:, 'curr_vel']))
return top
def export_raw(rawdf):
df = pd.DataFrame()
# Create columns to store data
id = pd.Series([], dtype=int)
pos_gain = pd.Series([], dtype=float)
vel_gain = pd.Series([], dtype=float)
vel_integrator_gain = | pd.Series([], dtype=float) | pandas.Series |
import sys, os
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.realpath(__file__))))
from scipy import stats
from scipy.stats import distributions
from fisher import pvalue
import numpy as np
import pandas as pd
from decimal import Decimal
import multiple_testing
from multiple_testing import Bonferroni, Sidak, HolmBonferroni, BenjaminiHochberg, BH_fast_v3
import ratio
class EnrichmentStudy(object):
"""
ToDo: change examples on website
add REST API examples
unify output of genome method vs compare samples
Runs Fisher's exact test, as well as multiple corrections
abundance_correction: Foreground vs Background abundance corrected
genome: Foreground vs Proteome (no abundance correction)
compare_samples: Foreground vs Background (no abundance correction)
compare_groups: Foreground(replicates) vs Background(replicates), --> foreground_n and background_n need to be set
characterize_foreground: Foreground only
"""
def __init__(self, pqo, args_dict, ui, assoc_dict, enrichment_method="genome", entity_type="-51",
o_or_u_or_both="overrepresented",
multitest_method="benjamini_hochberg", alpha=0.05,
association_2_count_dict_background=None, background_n=None,
indent=False):
self.pqo = pqo
self.args_dict = args_dict
self.ui = ui
self.method = enrichment_method
self.assoc_dict = assoc_dict
# self.obo_dag = obo_dag
self.alpha = alpha
self.multitest_method = multitest_method
self.results = []
self.o_or_u_or_both = o_or_u_or_both
self.entity_type = entity_type
self.indent = indent # prepend GO-terms with a "." for each level
### prepare run for everyone but "rank_enrichment"
if self.method != "rank_enrichment":
self.an_set_foreground = self.ui.get_foreground_an_set()
self.association_2_count_dict_foreground, self.association_2_ANs_dict_foreground, self.foreground_n = ratio.count_terms_v3(self.an_set_foreground, self.assoc_dict)
if self.method == "genome":
self.run_genome(association_2_count_dict_background, background_n)
elif self.method == "rank_enrichment":
self.df = self.run_rank_enrichment()
elif self.method == "abundance_correction":
self.run_abundance_correction()
elif self.method == "compare_samples":
self.run_compare_samples()
elif self.method == "compare_groups":
self.run_compare_groups()
elif self.method == "characterize_foreground":
self.run_characterize_foreground()
else:
raise NotImplementedError
def run_abundance_correction(self):
self.background_n = self.foreground_n
self.association_2_count_dict_background, self.association_2_ANs_dict_background = ratio.count_terms_abundance_corrected(self.ui, self.assoc_dict)
self.df = self.run_study(self.association_2_count_dict_foreground, self.association_2_count_dict_background, self.foreground_n, self.background_n)
def run_genome(self, association_2_count_dict_background, background_n):
self.association_2_count_dict_background, self.background_n = association_2_count_dict_background, background_n
self.df = self.run_study_genome(self.association_2_count_dict_foreground, self.association_2_count_dict_background, self.foreground_n, self.background_n)
def get_result(self, FDR_cutoff=None, fold_enrichment_for2background=None, p_value_uncorrected=None):
self.df = self.filter_results(self.df, FDR_cutoff, fold_enrichment_for2background, p_value_uncorrected)
# if self.method != "characterize_foreground": # since no p-values available
# self.df["p_value"] = self.df["p_value"].apply(lambda x: "{:.2E}".format(Decimal(x)))
# self.df["FDR"] = self.df["FDR"].apply(lambda x: "{:.2E}".format(Decimal(x)))
return self.df
def run_compare_samples(self):
self.an_set_background = self.ui.get_background_an_set()
self.association_2_count_dict_background, self.association_2_ANs_dict_background, self.background_n = ratio.count_terms_v3(self.an_set_background, self.assoc_dict)
self.df = self.run_study(self.association_2_count_dict_foreground, self.association_2_count_dict_background, self.foreground_n, self.background_n)
def run_compare_groups(self):
self.foreground_n = self.ui.get_foreground_n()
self.background_n = self.ui.get_background_n()
self.an_redundant_foreground = self.ui.get_an_redundant_foreground()
self.an_redundant_background = self.ui.get_an_redundant_background()
self.association_2_count_dict_foreground, self.association_2_ANs_dict_foreground, unused_an_count = ratio.count_terms_v3(self.an_redundant_foreground, self.assoc_dict)
self.association_2_count_dict_background, self.association_2_ANs_dict_background, unused_an_count = ratio.count_terms_v3(self.an_redundant_background, self.assoc_dict)
self.df = self.run_study(self.association_2_count_dict_foreground, self.association_2_count_dict_background, self.foreground_n, self.background_n)
def run_characterize_foreground(self):
self.an_redundant_foreground = self.ui.get_an_redundant_foreground()
self.association_2_count_dict_foreground, self.association_2_ANs_dict_foreground, unused_an_count = ratio.count_terms_v3(self.an_redundant_foreground, self.assoc_dict)
self.df = self.characterize_foreground(self.association_2_count_dict_foreground, self.foreground_n)
def characterize_foreground(self, association_2_count_dict_foreground, foreground_n):
term_list, description_list, foreground_ids_list, foreground_count_list, ratio_in_foreground_list = [], [], [], [], []
for association, foreground_count in association_2_count_dict_foreground.items():
term_list.append(association)
# description_list.append(self.pqo.function_an_2_description_dict[association])
ratio_in_foreground_list.append(self.calc_ratio(foreground_count, foreground_n))
foreground_ids_list.append(';'.join(self.association_2_ANs_dict_foreground[association]))
foreground_count_list.append(foreground_count)
df = | pd.DataFrame({"term": term_list, "ratio_in_foreground": ratio_in_foreground_list, "foreground_ids": foreground_ids_list, "foreground_count": foreground_count_list}) | pandas.DataFrame |
from __future__ import absolute_import
import numpy as np
import pandas as pd
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss = pd.DataFrame(gauss)
uniform = {'oneA': np.random.rand(qty),
'oneB': np.random.rand(qty),
'hundredA': np.random.rand(qty)*100,
'hundredB': np.random.rand(qty)*100}
uniform = pd.DataFrame(uniform)
bivariate = {'A1': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+1]),
'A2': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+2]),
'A3': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+3]),
'A4': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+4]),
'A5': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+5]),
'B': np.random.randn(qty),
'C': np.hstack([np.zeros(qty/2), np.ones(qty/2)])}
bivariate = | pd.DataFrame(bivariate) | pandas.DataFrame |
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestComputeMoments(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series, prefix="moments_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.compute_moments(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.compute_moments(series)
def test7(self) -> None:
"""
Test series with `inf`.
"""
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[4] = np.inf
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestComputeFracZero(hut.TestCase):
def test1(self) -> None:
data = [0.466667, 0.2, 0.13333, 0.2, 0.33333]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.4,
0.2,
0.4,
0.0,
0.6,
0.4,
0.6,
0.2,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 20 / 75 = num_zeros / num_points.
expected = 0.266666
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_zero(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeFracNan(hut.TestCase):
def test1(self) -> None:
data = [0.4, 0.133333, 0.133333, 0.133333, 0.2]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.2,
0.2,
0.2,
0.0,
0.4,
0.2,
0.6,
0.0,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 15 / 75 = num_nans / num_points.
expected = 0.2
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_nan(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeNumFiniteSamples(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_finite_samples(series)
class TestComputeNumUniqueValues(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_unique_values(series)
class TestComputeDenominatorAndPackage(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati._compute_denominator_and_package(reduction=1, data=series)
class TestTTest1samp(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = | pd.Series([]) | pandas.Series |
# This is the (main) script which runs the beta and club convergence tests
# Before running this, you should download the data from NASS, per the instructions in the paper, and run prep_tool.py
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from matplotlib import pyplot as plt
import plotly.figure_factory as ff
# Importing the time series data
data_all = pd.read_csv('C:/Users/User/Documents/Data/Cornvergence/Corn_Counties_Yield_All_TimeSeries.csv')
data_belt = pd.read_csv('C:/Users/User/Documents/Data/Cornvergence/Corn_Counties_Yield_Belt_TimeSeries.csv')
# Creating counties directories for future use
counties_all = data_all.County
counties_belt = data_belt.County
# Creating the dataframes for the beta convergence test
df_belt = pd.DataFrame(columns = ['County', 'Rate', 'Initial'])
for i in range(len(data_belt.County)):
rate = np.log(data_belt['2015'][i] / data_belt['1991'][i])
init = np.log(data_belt['1991'][i])
row = np.transpose([data_belt.County[i], rate, init]).reshape(1,3)
row = pd.DataFrame(row, columns = df_belt.columns)
df_belt = pd.concat([df_belt, row], axis = 0)
df_all = pd.DataFrame(columns = ['County', 'Rate', 'Initial'])
for i in range(len(data_all.County)):
rate = np.log(data_all['2015'][i] / data_all['1991'][i])
init = np.log(data_all['1991'][i])
row = np.transpose([data_all.County[i], rate, init]).reshape(1,3)
row = pd.DataFrame(row, columns = df_all.columns)
df_all = pd.concat([df_all, row], axis = 0)
Y_belt = df_belt.Rate.astype(float) / 24
Y_belt = pd.DataFrame(Y_belt, columns = ['Rate'])
X_belt = df_belt['Initial']
X_belt = stats.add_constant(X_belt)
Y_all = df_all.Rate.astype(float) / 24
Y_all = pd.DataFrame(Y_all, columns = ['Rate'])
X_all = df_all['Initial']
X_all = stats.add_constant(X_all)
# Performing the beta convergence test and recording results
model_belt = stats.OLS(Y_belt.astype(float), X_belt.astype(float))
results_belt = model_belt.fit()
print(results_belt.summary())
file = open('C:/Users/User/Documents/Data/Cornvergence/beta_convergence_test_results_belt.txt', 'w')
file.write(results_belt.summary().as_text())
file.close()
threshold = 0.05
if (results_belt.params.Initial < 0) and (results_belt.pvalues.Initial < threshold):
print('\n---> There is sufficient evidence to support the hypothesis of beta convergence in the corn belt! <---\n')
model_all = stats.OLS(Y_all.astype(float), X_all.astype(float))
results_all = model_all.fit()
print(results_all.summary())
file = open('C:/Users/User/Documents/Data/Cornvergence/beta_convergence_test_results_all.txt', 'w')
file.write(results_all.summary().as_text())
file.close()
threshold = 0.05
if (results_all.params.Initial < 0) and (results_all.pvalues.Initial < threshold):
print('\n---> There is sufficient evidence to support the hypothesis of beta convergence everywhere! <---\n')
# Drawing the scatter plots
x_belt = X_belt['Initial'].values.astype(float)
y_belt = Y_belt['Rate'].values.astype(float)
plt.figure(figsize = (6,5))
plt.scatter(x_belt, y_belt, marker = '.', color = 'black')
plt.xlabel('Initial Level (Ln)')
plt.ylabel('Growth Rate')
plt.title('Yield Growth Rate as a function of Initial Yield', fontsize = 13)
plt.savefig('C:/Users/User/Documents/Data/Cornvergence/beta_convergence_plot_belt.eps')
xx_belt = stats.add_constant(x_belt)
mod_belt = stats.OLS(y_belt, xx_belt)
res_belt = mod_belt.fit()
b = res_belt.params[0]
m = res_belt.params[1]
line = m * x_belt + b
plt.plot(x_belt, line, color = 'black')
plt.savefig('C:/Users/User/Documents/Data/Cornvergence/beta_convergence_plot_belt_line.eps')
x_all = X_all['Initial'].values.astype(float)
y_all = Y_all['Rate'].values.astype(float)
plt.figure(figsize = (6,5))
plt.scatter(x_all, y_all, marker = '.', color = 'red')
plt.scatter(x_belt, y_belt, marker = '.', color = 'black')
plt.xlabel('Initial Level (Ln)')
plt.ylabel('Growth Rate')
plt.title('Yield Growth Rate as a function of Initial Yield', fontsize = 13)
plt.savefig('C:/Users/User/Documents/Data/Cornvergence/beta_convergence_plot_all.eps')
xx_all = stats.add_constant(x_all)
mod_all = stats.OLS(y_all, xx_all)
res_all = mod_all.fit()
b = res_all.params[0]
m = res_all.params[1]
line2 = m * x_all + b
plt.plot(x_belt, line, color = 'black')
plt.plot(x_all, line2, color = 'red')
plt.savefig('C:/Users/User/Documents/Data/Cornvergence/beta_convergence_plot_all_line.eps')
# Now we move on to sigma convergence by calculating annual sample standard deviations and coefficients of variation
sigma_belt = [np.std(data_belt[str(i)]) for i in range(1991,2016)]
cv_belt = [np.std(data_belt[str(i)]) / np.mean(data_belt[str(i)]) for i in range(1991,2016)]
sigma_all = [np.std(data_all[str(i)]) for i in range(1991,2016)]
cv_all = [np.std(data_all[str(i)]) / np.mean(data_all[str(i)]) for i in range(1991,2016)]
# Write sigma convergence results to file
sigmastats = [['Corn Belt - STD', sigma_belt[0] - sigma_belt[len(sigma_belt)-1]], ['Corn Belt - CV', cv_belt[0] - cv_belt[len(cv_belt)-1]], ['All - STD', sigma_all[0] - sigma_all[len(sigma_all)-1]], ['All - CV', cv_all[0] - cv_all[len(cv_all)-1]]]
cols = ['Measure', 'Difference']
for i in range(1991,2016):
cols.append(str(i))
sigmastats[0].append(sigma_belt[i-1991])
sigmastats[1].append(cv_belt[i-1991])
sigmastats[2].append(sigma_all[i-1991])
sigmastats[3].append(cv_all[i-1991])
sigma_df = pd.DataFrame(sigmastats, columns = cols)
sigma_df.to_csv('C:/Users/User/Documents/Data/Cornvergence/sigma_stats.txt', index = False)
# Club Convergence (via Phillips and Sul, 2007)
# Resetting the index for convenience
data_all = data_all.set_index('County')
data_belt = data_belt.set_index('County')
# First, create the matrix X_{it}
years = [i for i in range(1991, 2016)]
vals_all = [sum(data_all[str(year)]) for year in years]
vals_belt = [sum(data_belt[str(year)]) for year in years]
little_h_all = np.zeros(np.shape(data_all))
for i in range(len(data_all)):
for j in range(len(years)):
little_h_all[i,j] = data_all.values[i,j] / ((1 / len(data_all)) * vals_all[j])
little_h_belt = np.zeros(np.shape(data_belt))
for i in range(len(data_belt)):
for j in range(len(years)):
little_h_belt[i,j] = data_belt.values[i,j] / ((1 / len(data_belt)) * vals_belt[j])
# Second, find the cross sectional variance of X_{it}
BIG_H_ALL = np.zeros((1, len(years)))
BIG_H_BELT = np.zeros((1, len(years)))
for i in range(len(years)):
s_all = 0
s_belt = 0
for j in range(len(data_all)):
s_all += (little_h_all[j,i] - 1) ** 2
BIG_H_ALL[0,i] = s_all / len(data_all)
for j in range(len(data_belt)):
s_belt += (little_h_belt[j,i] - 1) ** 2
BIG_H_BELT[0,i] = s_belt / len(data_belt)
# Third, run regression to obtain estiamtes \hat{a} and \hat{b} -- we choose var = 5 based on observing sigma_all and sigma_belt
var = 5
ratios_all = [np.log(BIG_H_ALL[0,0] / BIG_H_ALL[0,t]) for t in range(var,len(years))]
ratios_belt = [np.log(BIG_H_BELT[0,0] / BIG_H_BELT[0,t]) for t in range(var,len(years))]
LHS_all = [ratios_all[t] - 2*(np.log(np.log(t+var))) for t in range(len(ratios_all))]
LHS_all = pd.DataFrame(LHS_all, columns = ['LHS'])
RHS_all = [np.log(t) for t in range(var,len(years))]
RHS_all = pd.DataFrame(RHS_all, columns = ['RHS'])
RHS_all = stats.add_constant(RHS_all)
LHS_belt = [ratios_belt[t] - 2*(np.log(np.log(t+var))) for t in range(len(ratios_belt))]
LHS_belt = pd.DataFrame(LHS_belt, columns = ['LHS'])
RHS_belt = [np.log(t) for t in range(var,len(years))]
RHS_belt = pd.DataFrame(RHS_belt, columns = ['RHS'])
RHS_belt = stats.add_constant(RHS_belt)
club_model_all = stats.OLS(LHS_all, RHS_all)
club_results_all = club_model_all.fit()
print(club_results_all.summary())
file = open('C:/Users/User/Documents/Data/Cornvergence/club_results_all.txt', 'w')
file.write(club_results_all.summary().as_text())
file.close()
club_model_belt = stats.OLS(LHS_belt, RHS_belt)
club_results_belt = club_model_belt.fit()
print(club_results_belt.summary())
file = open('C:/Users/User/Documents/Data/Cornvergence/club_results_belt.txt', 'w')
file.write(club_results_belt.summary().as_text())
file.close()
# Given the results for \hat{a} and \hat{b}, we may use a club convergence algorithm to determine club membership
# We define a function for determining convergence club membership
def clubbing(idx, dataset):
club_vals = [sum(dataset[str(year)][0:idx+1]) for year in years]
club_h = np.zeros(np.shape(dataset))
for i in range(idx):
for j in range(len(years)):
club_h[i,j] = dataset.values[i,j] / ((1 / (idx+1)) * club_vals[j])
club_H = np.zeros((1, len(years)))
for i in range(len(years)):
s = 0
for j in range(idx):
s += (club_h[j,i] - 1) ** 2
club_H[0,i] = s / idx
var = 5
ratios = [np.log(club_H[0,0] / club_H[0,t]) for t in range(var,len(years))]
LHS = [ratios[t] - 2*(np.log(np.log(t+var))) for t in range(len(ratios))]
LHS = pd.DataFrame(LHS, columns = ['LHS'])
RHS = [np.log(t) for t in range(var,len(years))]
RHS = pd.DataFrame(RHS, columns = ['RHS'])
RHS = stats.add_constant(RHS)
club_model = stats.OLS(LHS, RHS)
club_results = club_model.fit()
beta = club_results.params[1]
return beta
# Creating the convergence clubs
# Plots of the ranked final year ordered yields
basis_all = [i for i in range(1,len(data_all)+1)]
plt.figure(figsize = (6,5))
plt.plot(basis_all, data_all['2015'], color = 'black')
plt.xlabel('County Rank')
plt.ylabel('Yield (BU / ACRE)')
plt.title('Ordered Plot of County Yields - All', fontsize = 13)
plt.savefig('C:/Users/User/Documents/Data/Cornvergence/2015_plot_all.eps')
basis_belt = [i for i in range(1,len(data_belt)+1)]
plt.figure(figsize = (6,5))
plt.plot(basis_belt, data_belt['2015'], color = 'black')
plt.xlabel('County Rank')
plt.ylabel('Yield (BU / ACRE)')
plt.title('Ordered Plot of County Yields - Corn Belt', fontsize = 13)
plt.savefig('C:/Users/User/Documents/Data/Cornvergence/2015_plot_belt.eps')
# Club membership algorithm using the function above - all counties
idx = 0
a = 0
clubs = []
remaining = [i for i in range(len(data_all))]
while len(remaining) > 0:
beta = 1
while beta > 0:
idx += 1
print(a+idx)
if (a + idx) == max(remaining):
clubs.append(remaining)
remaining = []
beta = -1
else:
beta = clubbing(idx, data_all)
if beta < 0:
club = [i for i in range(a,a+idx)]
clubs.append(club)
for item in club:
remaining.remove(item)
data_all = data_all.iloc[idx:len(data_all), :]
print(data_all)
a += idx
idx = 0
# List names of members of clubs
all_clubs = clubs
all_members = []
for club in clubs:
group = [counties_all[idx] for idx in club]
all_members.append(group)
# Club membership algorithm using the function above - corn belt
idx = 1
a = 0
clubs = []
remaining = [i for i in range(len(data_belt))]
while len(remaining) > 0:
beta = 1
while beta > 0:
idx += 1
print(a+idx)
if (a + idx) == max(remaining):
clubs.append(remaining)
remaining = []
beta = -1
else:
beta = clubbing(idx, data_belt)
if beta < 0:
if idx == 2:
idx = 1
club = [i for i in range(a,a+idx)]
clubs.append(club)
for item in club:
remaining.remove(item)
data_belt = data_belt.iloc[idx:len(data_belt), :]
print(data_belt)
a += idx
idx = 1
# List names of members of clubs
belt_clubs = clubs
belt_members = []
for club in clubs:
group = [counties_belt[idx] for idx in club]
belt_members.append(group)
# Write clubs with members to txt file for reference
all_members = | pd.DataFrame(all_members) | pandas.DataFrame |
"""
timedelta support tools
"""
import re
from datetime import timedelta
import numpy as np
import pandas.tslib as tslib
from pandas import compat, _np_version_under1p7
from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype, is_timedelta64_dtype,
_values_from_object, is_list_like, isnull)
repr_timedelta = tslib.repr_timedelta64
repr_timedelta64 = tslib.repr_timedelta64
def to_timedelta(arg, box=True, unit='ns'):
"""
Convert argument to timedelta
Parameters
----------
arg : string, timedelta, array of strings (with possible NAs)
box : boolean, default True
If True returns a Series of the results, if False returns ndarray of values
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an integer/float number
Returns
-------
ret : timedelta64/arrays of timedelta64 if parsing succeeded
"""
if _np_version_under1p7:
raise ValueError("to_timedelta is not support for numpy < 1.7")
def _convert_listlike(arg, box, unit):
if isinstance(arg, (list,tuple)):
arg = np.array(arg, dtype='O')
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
elif is_integer_dtype(arg):
unit = _validate_timedelta_unit(unit)
# these are shortcutable
value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]')
else:
try:
value = tslib.array_to_timedelta64(_ensure_object(arg),unit=unit)
except:
value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ])
if box:
from pandas import Series
value = Series(value,dtype='m8[ns]')
return value
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg.values, box=False, unit=unit)
return Series(values, index=arg.index, name=arg.name, dtype='m8[ns]')
elif is_list_like(arg):
return _convert_listlike(arg, box=box, unit=unit)
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit)
def _validate_timedelta_unit(arg):
""" provide validation / translation for timedelta short units """
if re.search("Y|W|D",arg,re.IGNORECASE) or arg == 'M':
return arg.upper()
elif re.search("h|m|s|ms|us|ns",arg,re.IGNORECASE):
return arg.lower()
raise ValueError("invalid timedelta unit {0} provided".format(arg))
_short_search = re.compile(
"^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE)
_full_search = re.compile(
"^\s*(?P<neg>-?)\s*(?P<days>\d+)?\s*(days|d|day)?,?\s*(?P<time>\d{2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE)
_nat_search = re.compile(
"^\s*(nat|nan)\s*$",re.IGNORECASE)
_whitespace = re.compile('^\s*$')
def _coerce_scalar_to_timedelta_type(r, unit='ns'):
""" convert strings to timedelta; coerce to np.timedelta64"""
if isinstance(r, compat.string_types):
# we are already converting to nanoseconds
converter = _get_string_converter(r, unit=unit)
r = converter()
unit='ns'
return tslib.convert_to_timedelta(r,unit)
def _get_string_converter(r, unit='ns'):
""" return a string converter for r to process the timedelta format """
# treat as a nan
if _whitespace.search(r):
def convert(r=None, unit=None):
return tslib.iNaT
return convert
m = _short_search.search(r)
if m:
def convert(r=None, unit=unit, m=m):
if r is not None:
m = _short_search.search(r)
gd = m.groupdict()
r = float(gd['value'])
u = gd.get('unit')
if u is not None:
unit = u.lower()
if gd['neg']:
r *= -1
return tslib.cast_from_unit(r, unit)
return convert
m = _full_search.search(r)
if m:
def convert(r=None, unit=None, m=m):
if r is not None:
m = _full_search.search(r)
gd = m.groupdict()
# convert to seconds
value = float(gd['days'] or 0) * 86400
time = gd['time']
if time:
(hh,mm,ss) = time.split(':')
value += float(hh)*3600 + float(mm)*60 + float(ss)
frac = gd['frac']
if frac:
value += float(frac)
if gd['neg']:
value *= -1
return tslib.cast_from_unit(value, 's')
return convert
m = _nat_search.search(r)
if m:
def convert(r=None, unit=None, m=m):
return tslib.iNaT
return convert
# no converter
raise ValueError("cannot create timedelta string converter for [{0}]".format(r))
def _possibly_cast_to_timedelta(value, coerce=True):
""" try to cast to timedelta64, if already a timedeltalike, then make
sure that we are [ns] (as numpy 1.6.2 is very buggy in this regards,
don't force the conversion unless coerce is True
if coerce='compat' force a compatibilty coercerion (to timedeltas) if needeed
"""
# coercion compatability
if coerce == 'compat' and _np_version_under1p7:
def convert(td, dtype):
# we have an array with a non-object dtype
if hasattr(td,'item'):
td = td.astype(np.int64).item()
if td == tslib.iNaT:
return td
if dtype == 'm8[us]':
td *= 1000
return td
if | isnull(td) | pandas.core.common.isnull |
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.preprocessing import scale
class Preprocessing():
def __init__(self, train, test):
#classification column
self._clsTrain = train.columns[-1]
#only categorical features
obj_dfTrain = train.select_dtypes(include=['object']).copy()
self._objectListTrain = obj_dfTrain.columns.values
#remove classification column
self._objectListTrain = np.delete(self._objectListTrain, -1)
#classification test column
self._clsTest = test.columns[-1]
# ronly categorical features test set
obj_dfTest = test.select_dtypes(include=['object']).copy()
self._objectListTest= obj_dfTest.columns.values
#remove classification column from test set
self._objectListTest = np.delete(self._objectListTest, -1)
''' def __init__(self):
print("Preprocessing void")
'''
def getCls(self):
return self._clsTrain, self._clsTest
#one-hot encoding function
def preprocessingOneHot(self,train,test):
#print(self._objectListTrain)
train = | pd.get_dummies(train, columns=self._objectListTrain) | pandas.get_dummies |
import unittest
import pandas as pd
from featurefilter import FeatureCorrelationFilter
def test_fit_high_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 1],
'B': [0, 1]})
filter_ = FeatureCorrelationFilter()
train_df = filter_.fit(train_df)
assert filter_.columns_to_drop == ['B']
def test_excluding_target_column():
train_df = pd.DataFrame({'A': [0, 1],
'B': [0, 1],
'Y': [0, 1]})
filter_ = FeatureCorrelationFilter(target_column='Y')
train_df = filter_.fit(train_df)
assert filter_.columns_to_drop == ['B']
def test_high_negative_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 1], 'B': [0, -1], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': [0, 0], 'B': [0, 0], 'Y': [0, 1]})
filter_ = FeatureCorrelationFilter(target_column='Y')
train_df = filter_.fit_transform(train_df)
test_df = filter_.transform(test_df)
assert train_df.equals(pd.DataFrame({'A': [0, 1], 'Y': [0, 1]}))
assert test_df.equals( | pd.DataFrame({'A': [0, 0], 'Y': [0, 1]}) | pandas.DataFrame |
import os
import json
from collections import Counter, defaultdict
import pandas as pd
import networkx as nx
import statistics
import math
from sklearn.metrics import cohen_kappa_score
import seaborn as sns
import pandas
import matplotlib.pyplot as plt
ANNOTATION_TASKS = ["participants", "subevents"]
TASK_TO_INDEX = {
'participants' : 0,
'subevents' : 1
}
def load_user_annotations(user_annotations_folder, annotation_task, batches, verbose=0):
"""
Load the user annotations for
a) one annotation task
b) n batches
:rtype: dict
:return: mapping from (src, tgt) -> value
"""
user_annotations = dict()
index = TASK_TO_INDEX[annotation_task]
for batch in batches:
folder_path = os.path.join(user_annotations_folder, batch)
anno_json_path = os.path.join(folder_path, 'annotations', 'annotations.json')
index_json_path = os.path.join(folder_path, 'annotations', 'id_to_edge.json')
assert os.path.exists(anno_json_path)
with open(anno_json_path) as infile:
anno = json.load(infile)
with open(index_json_path) as infile:
id_to_edge = json.load(infile)
for id_, values in anno.items():
if id_ == 'the_end':
continue
if values == False:
string_value = 'dk'
elif type(values) == list:
string_value = values[index]
edge = id_to_edge[id_] # edges is (parent, child)
if string_value in {'dk', 'ns'}:
value = string_value
elif string_value in {'1', '2', '3', '4', '5', '6', '7'}:
value = int(string_value)
else:
raise Exception(f'provided annotation {string_value} for id {id_} is not valid. Please inspect.')
key = tuple(edge)
if key in user_annotations:
print()
print(f'found existing annotation for {key}: skipping')
continue
user_annotations[tuple(edge)] = value
if verbose >= 1:
print()
print(f'folder {user_annotations_folder}')
print(f'annotation task: {annotation_task}')
print(f'batches: {batches}')
print(f'# of items annotated: {len(user_annotations)}')
print(Counter(user_annotations.values()))
return user_annotations
def combine_annotations(users, batches, main_anno_folder, verbose=0):
edge_to_user_to_task_to_value = dict()
for user in users:
for task in ANNOTATION_TASKS:
print()
print(f'working on task {task} for user {user}')
user_annotations_folder = os.path.join(main_anno_folder, user)
edge_to_value = load_user_annotations(user_annotations_folder=user_annotations_folder,
annotation_task=task,
batches=batches,
verbose=verbose)
for edge, value in edge_to_value.items():
if edge not in edge_to_user_to_task_to_value:
info = {user : {task : None}
for user in users
for task in ANNOTATION_TASKS}
edge_to_user_to_task_to_value[edge] = info
edge_to_user_to_task_to_value[edge][user][task] = value
return edge_to_user_to_task_to_value
def obtain_kappa_score(output_folder, users, annotation_task):
"""
:param output_folder:
:param annotation_task:
:return:
"""
user_one, user_two = users
path = os.path.join(output_folder, f'{annotation_task}_{user_one}.json')
with open(path) as infile:
info_user_one = json.load(infile)
path = os.path.join(output_folder, f'{annotation_task}_{user_two}.json')
with open(path) as infile:
info_user_two = json.load(infile)
assert set(info_user_one) == set(info_user_two)
labels_user_one = []
labels_user_two = []
for key in info_user_one:
labels_user_one.append(info_user_one[key])
labels_user_two.append(info_user_two[key])
kappa = cohen_kappa_score(y1=labels_user_one, y2=labels_user_two)
return kappa
def compute_agreement(edge_to_user_to_task_to_value,
annotation_task,
output_folder,
verbose=0):
"""
create a table in which the user agreement for a particular task is shown
"""
category_to_edges = defaultdict(list)
filtered_user_to_edge_to_value = defaultdict(dict)
num_cat_other = 0
for edge, user_to_task_to_value in edge_to_user_to_task_to_value.items():
user_to_value = {user: task_to_value[annotation_task]
for user, task_to_value in user_to_task_to_value.items()}
values = list(user_to_value.values())
if values == ['dk' , 'dk']:
if verbose >= 2:
print(f'discarded {edge} {annotation_task} because both annotators indicated "dk"')
continue
if values == ['ns' , 'ns']:
if verbose >= 2:
print(f'discarded {edge} {annotation_task} because both annotators indicated "ns"')
continue
category = "other"
if all([type(value) == int
for value in values]):
category = abs(values[0] - values[1]) # we focus on two annotators
if category == "other":
num_cat_other += 1
continue
category_to_edges[category].append(edge)
for user, value in user_to_value.items():
edge_string = '---'.join([id_ for id_ in edge])
filtered_user_to_edge_to_value[user][edge_string] = value
num_annotations = []
for user, annotations in filtered_user_to_edge_to_value.items():
num_annotations.append(len(annotations))
assert len(set(num_annotations)) == 1, f'this set should only have one value: {num_annotations}'
for user, annotations in filtered_user_to_edge_to_value.items():
json_path = os.path.join(output_folder, f'{annotation_task}_{user}.json')
with open(json_path, 'w') as outfile:
json.dump(annotations, outfile)
if verbose:
print()
print(f'number of items in category "other": {num_cat_other}')
print('i.e., one annotator specified an integer and the other dk or ns')
# create table
list_of_lists = []
headers = ['Delta between annotations', 'Number of items']
for category, edges in category_to_edges.items():
one_row = [category, len(edges)]
list_of_lists.append(one_row)
df = pd.DataFrame(list_of_lists, columns=headers)
df = df.sort_values('Delta between annotations')
# cumulative relative frequency
num_items = sum(df['Number of items'])
cum_rel_freq_values = []
cum_rel_freq = 0
for index, row in df.iterrows():
rel_freq = 100 * (row['Number of items'] / num_items)
cum_rel_freq += rel_freq
cum_rel_freq_values.append(cum_rel_freq)
df['Cumulative Rel Freq'] = cum_rel_freq_values
# export table
excel_path = os.path.join(output_folder, f'agreement_{annotation_task}.xlsx')
df.to_excel(excel_path, index=False)
if verbose >= 1:
print()
print(f'saved agreement for {annotation_task} to {excel_path}')
latex_path = os.path.join(output_folder, f'agreement_{annotation_task}.tex')
df.to_latex(latex_path, index=False)
if verbose >= 1:
print()
print(f'saved agreement for {annotation_task} to {latex_path}')
def load_graph_from_edgelist(path_to_edge_list, verbose=0):
"""
:param str path_to_edge_list: load graph from edge list
"""
g = nx.read_edgelist(path_to_edge_list, create_using=nx.DiGraph())
if verbose:
print()
print(f'loaded edge list from: {path_to_edge_list}')
print(nx.info(g))
return g
def update_sample_graph_with_annotations(sample_graph,
edge_to_user_to_task_to_value,
verbose=0):
"""
:param sample_graph: the directed graph selected for annotation
:param edge_to_user_to_task_to_value: a mapping
from edge to user to task to value
:return: the same graph but with the annotations added
as attributes to the edges
"""
all_edge_attrs = {}
for edge, user_to_task_to_value in edge_to_user_to_task_to_value.items():
edge_attrs = {task : {}
for task in ANNOTATION_TASKS}
for user, task_to_value in user_to_task_to_value.items():
for task, value in task_to_value.items():
edge_attrs[task][user] = value
all_edge_attrs[edge] = edge_attrs
nx.set_edge_attributes(sample_graph, all_edge_attrs)
if verbose:
print()
print(f'update edge attributes for {len(all_edge_attrs)} edges')
return sample_graph
def get_average_edge_value(g, edge, annotation_task, users, verbose=0):
"""
:param g:
:param edge:
:param annotation_task:
:return:
"""
values = []
u, v = edge
attrs = g.get_edge_data(u, v)
if attrs:
for user, value in attrs[annotation_task].items():
if type(value) == int:
values.append(value)
if len(values) != len(users):
values = []
if values:
avg = sum(values) / len(values)
else:
avg = None
if verbose >= 4:
if values:
print(values)
return avg
def determine_candidate_basic_levels(g, annotation_task, users, verbose=0):
"""
Determine nodes with:
a) annotations in edges from children to candidate basic level
b) annotations in edges from candidate basic levels to superordinate events
for debugging purposes:
a) edge sport:67
-from Q13406554 (sports competition)
-to Q16510064 (sporting event)
-edge in JSON ["Q13406554", "Q16510064"]
-participants: Piek: 3, Antske: 3
-subevents: Piek: 3, Antske: 3
b) edge sport:34
-from Q16510064 (sporting event)
-to Q46190676 (tennis event)
- edge in JSON ["Q16510064", "Q46190676"]
-participants: Piek: 3, Antske: 2
-subevents: Piek: 3, Antske: 4
:rtype: dict
:return: mapping from event_id ->
{
“children” -> avg from edges,
“parents” -> avg from edges
}
"""
ev_to_anno_info = {}
for node in g.nodes():
children = g.successors(node)
parents = g.predecessors(node)
children_edges = [(node, child)
for child in children]
assert len(children_edges) == len(set(children_edges))
parent_edges = [(parent, node)
for parent in parents]
assert len(parent_edges) == len(set(parent_edges))
for parent, child in children_edges + parent_edges:
assert g.has_edge(parent, child), f'{(parent, child)} not found in graph'
if any([not children_edges,
not parent_edges]):
continue
children_avgs = []
children_edges_to_value = dict()
for children_edge in children_edges:
child_edge_avg = get_average_edge_value(g, children_edge, annotation_task, users, verbose=verbose)
if child_edge_avg is not None:
children_avgs.append(child_edge_avg)
children_edges_to_value[children_edge] = child_edge_avg
parent_avgs = []
parent_edges_to_value = dict()
for parent_edge in parent_edges:
parent_edge_avg = get_average_edge_value(g, parent_edge, annotation_task, users, verbose=verbose)
if parent_edge_avg is not None:
parent_avgs.append(parent_edge_avg)
parent_edges_to_value[parent_edge] = parent_edge_avg
if any([not children_avgs,
not parent_avgs]):
continue
children_value = sum(children_avgs) / len(children_avgs)
parent_value = sum(parent_avgs) / len(parent_avgs)
delta = children_value - parent_value
result = {
'children': children_value,
'children_edges_to_value' : children_edges_to_value,
'parents': parent_value,
'parents_edges_to_value' : parent_edges_to_value,
'delta' : delta
}
if verbose >= 3:
print()
print(node)
print('children', children_edges)
print('children averages', children_avgs)
print('parents', parent_edges)
print('parent averages', parent_avgs)
print(result)
ev_to_anno_info[node] = result
if verbose:
print()
print(f'collected relevant BLE annotation information for {len(ev_to_anno_info)} nodes')
return ev_to_anno_info
def ble_analysis(candidate_ble_info,
node_to_depth,
output_folder,
verbose=0):
"""
:param candidate_ble_info:
:param annotation_task:
:param output_folder:
"""
list_of_lists = []
headers = ['Node ID', 'Node Depth', 'Delta subevents', 'Delta participants']
for node in candidate_ble_info['subevents']: # ugly hack to get iterable of relevant nodes
delta_subevents = candidate_ble_info['subevents'][node]['delta']
delta_participants = candidate_ble_info['participants'][node]['delta']
one_row = [node,
node_to_depth[node],
round(delta_subevents, 1),
round(delta_participants, 1),
]
list_of_lists.append(one_row)
df = pd.DataFrame(list_of_lists, columns=headers)
df = df.sort_values('Delta subevents', ascending=False)
excel_path = f'{output_folder}/ble_delta.xlsx'
df.to_excel(excel_path, index=False)
tex_path = f'{output_folder}/ble_delta.tex'
df.to_latex(tex_path, index=False)
if verbose:
print()
print('saved BLE delta table to')
print(excel_path)
print(tex_path)
return df
def analyze_df(df,
turning_point,
annotation_task,
verbose=0):
"""
:param df:
:return:
"""
low = []
high = []
for index, row in df.iterrows():
task_value = row[f'Delta {annotation_task}']
depth = row['Node Depth']
if task_value >= turning_point:
high.append(depth)
else:
low.append(depth)
high_avg_depth = statistics.mean(high)
low_avg_depth = statistics.mean(low)
if verbose >= 1:
print()
print('turning point', turning_point)
print('on or above turning point', len(high))
print('below turning point', len(low))
print('average depth')
print('high', round(high_avg_depth,2))
print('low', round(low_avg_depth,2))
print('high distribution', sorted(Counter(high).items()))
print('high standard deviation', statistics.stdev(high))
print('low distribution', sorted(Counter(low).items()))
print('low standard deviation', statistics.stdev(low))
def create_dot_of_ble_candidate(ble_candidate_info,
ev_coll_obj,
output_path=None,
verbose=0):
"""
:param dict ble_candidate_info: see output determine_candidate_basic_levels
{'children': 5.25,
'children_edges_to_value': {('Q3270632', 'Q4582333'): 6.5,
('Q3270632', 'Q1152547'): 4.0},
'parents': 5.5,
'parents_edges_to_value': {('Q18608583', 'Q3270632'): 4.5,
('Q1079023', 'Q3270632'): 6.5},
'delta': -0.25}}
"""
g = nx.DiGraph()
keys = ['children_edges_to_value',
'parents_edges_to_value']
nodes = set()
for key in keys:
for (u, v) in ble_candidate_info[key]:
nodes.update((u, v))
for node in nodes:
uri = f'http://www.wikidata.org/entity/{node}'
ev_obj = ev_coll_obj.event_type_id_to_event_type_obj[uri]
g.add_node(node, label=ev_obj.label_to_show)
for key in keys:
edges = ble_candidate_info[key]
for (u, v), weight in edges.items():
g.add_edge(u, v, label=weight)
if output_path is not None:
p = nx.drawing.nx_pydot.to_pydot(g)
p.write_png(output_path)
if verbose >= 3:
print()
print(f'written output to {output_path}')
def create_heatmap(piek_json,
antske_json,
output_path=None,
verbose=0):
"""
"""
# initialize dataframe
likert_values = [1, 2, 3, 4, 5, 6, 7]
df = | pandas.DataFrame() | pandas.DataFrame |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any() or grossmargins.std()==0:
return None
marginstability=grossmargins.mean()/grossmargins.std()
if pd.isnull(marginstability):
return None
return float(marginstability)
def get_cacl(self,df,seed=-1):
a=self.get_value(df,'totalcurrentassets',seed)
l=self.get_value(df,'totalcurrentliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_tatl(self,df,seed=-1):
a=self.get_value(df,'totalassets',seed)
l=self.get_value(df,'totalliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_longterm_cacl(self,df,seed=-1,length=20):
ltcacls=[]
for i in range(seed,seed-length,-1):
ltcacls.append(self.get_cacl(df,i))
ltcacls=pd.Series(ltcacls)
if pd.isnull(ltcacls).any():
return None
return stats.gmean(1+ltcacls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_longterm_tatl(self,df,seed=-1,length=20):
lttatls=[]
for i in range(seed,seed-length,-1):
lttatls.append(self.get_tatl(df,i))
lttatls=pd.Series(lttatls)
if pd.isnull(lttatls).any():
return None
return stats.gmean(1+lttatls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_capex(self,df,seed=-1,length=4):
purchaseofplantpropertyandequipment=self.get_sum_quarters(df,'purchaseofplantpropertyandequipment',seed,length)
saleofplantpropertyandequipment=self.get_sum_quarters(df,'saleofplantpropertyandequipment',seed,length)
s=pd.Series([purchaseofplantpropertyandequipment,saleofplantpropertyandequipment])
if pd.isnull(s).all():
return None
m=pd.Series([-1,-1])
capex=(s*m).sum()
if capex is None:
return None
return float(capex)
def get_freecashflow(self,df,seed=-1):
netcashfromoperatingactivities=self.get_value(df,'netcashfromoperatingactivities',seed)
capex=self.get_capex(df,seed,length=1)
s=pd.Series([netcashfromoperatingactivities,capex])
if pd.isnull(s).all():
return None
m=pd.Series([1,-1])
fcf=(s*m).sum()
return float(fcf)
#add a length2 paramater so we take the sums of cash flows
def get_cashflowonassets(self,df,seed=-1,length1=20,length2=4):
cfoas=[]
for i in range(seed,seed-length1,-1):
start_assets=self.get_value(df,'totalassets',i-length2)
end_assets=self.get_value(df,'totalassets',i)
fcfs=[]
for k in range(i,i-length2,-1):
fcf=self.get_freecashflow(df,k)
fcfs.append(fcf)
if pd.isnull(fcfs).any():
return None
total_fcf=pd.Series(fcfs).sum()
avg_assets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([total_fcf,avg_assets]).any() or avg_assets==0:
return None
else:
cfoas.append(total_fcf/avg_assets)
if pd.isnull(cfoas).any():
return None
else:
if pd.isnull(stats.gmean(1+pd.Series(cfoas))-1):
return None
else:
return stats.gmean(1+pd.Series(cfoas))-1 #we want to punish variability because the higher number the better
def get_roa(self,df,seed=-1,length=4):
netincome=self.get_sum_quarters(df,'netincome',seed,length)
start_assets=self.get_value(df,'totalassets',seed-length)
end_assets=self.get_value(df,'totalassets',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([netincome,totalassets]).any() or totalassets==0:
return None
roa=netincome/totalassets
return float(roa)
def get_roc(self,df,seed=-1,length=4):
ebit=self.get_ebit(df,seed,length)
dividends=self.get_sum_quarters(df,'paymentofdividends',seed,length)
start_debt=self.get_netdebt(df,seed-length)
end_debt=self.get_netdebt(df,seed)
netdebt=pd.Series([start_debt,end_debt]).mean()
start_equity=self.get_value(df,'totalequity',seed-length)
end_equity=self.get_value(df,'totalequity',seed)
totalequity=pd.Series([start_equity,end_equity]).mean()
num= | pd.Series([ebit,dividends]) | pandas.Series |
# coding=utf-8
# !/usr/bin/env python3
import os, re
import numpy as np
import pandas as pd
def svLen(sv_data):
data_grab = re.compile("^.*SVLEN=(?P<sv_len>-?[0-9]+).*$")
if 'SVLEN' in str(sv_data['INFO'].iloc[0]):
data_info = data_grab.search(sv_data['INFO'].iloc[0]).groupdict()
sv_len = data_info['sv_len']
else:
# if the sv_type is not DEL, INS, DUP or INV, we prefer to preserve it thus default sv_len 51 (>50).
sv_len = 51
return int(sv_len)
def svType(sv_data):
data_grab = re.compile("^.*SVTYPE=(?P<sv_type>[a-zA-Z]+).*$")
if 'SVTYPE' in str(sv_data['INFO'].iloc[0]):
data_info = data_grab.search(sv_data['INFO'].iloc[0]).groupdict()
sv_type = data_info['sv_type']
else:
sv_type = 'None'
return sv_type
def readvcf(file_name):
count_num = 0
with open(file_name,'r') as f1:
for row in f1:
if '#' in row:
count_num = count_num + 1
# print(count_num)
rawData = pd.read_csv(file_name,skiprows=count_num-1,sep='\t')
rawData = rawData.set_index('#CHROM')
rawData.index.name = 'CHROM'
# print(rawData.loc['chr1'])
return rawData
def typeCalculate(file_name):
if 'vcf' in file_name:
sv_data = readvcf(file_name)
else:
sv_data = pd.read_csv(file_name)
# print(sv_data)
# dnsv_filter_data =pd.DataFrame(columns=dnsv_data.columns)
sv_type_list = []
for i in range(sv_data.shape[0]):
print(i)
# sv_len =svLen(sv_data.iloc[[i]])
# if sv_len>10000:
sv_type = svType(sv_data.iloc[[i]])
sv_type_list.append(sv_type)
sv_type_list = pd.Series(sv_type_list)
print(sv_type_list.value_counts())
return
def process_bar(i):
num = i // 2
if i == 100:
process = "\r[%3s%%]: |%-50s|\n" % (i, '|' * num)
else:
process = "\r[%3s%%]: |%-50s|" % (i, '|' * num)
print(process, end='', flush=True)
def calcultateImprecise(file_name):
data = pd.read_csv(file_name)
imprecise_ins = pd.DataFrame(columns=data.columns)
imprecise_del = pd.DataFrame(columns=data.columns)
imprecise = pd.DataFrame(columns=data.columns)
process_count = 0; process_path = data.shape[0]/100
for i in range(data.shape[0]):
if i >= process_path * process_count:
process_bar(process_count+1)
process_count = process_count + 1
sv_type =svType(data.iloc[[i]])
if 'IMPRECISE' in data['INFO'].iloc[i]:
imprecise = pd.concat([imprecise, data.iloc[[i]]])
if sv_type == 'INS':
imprecise_ins = pd.concat([imprecise_ins, data.iloc[[i]]])
elif sv_type == 'DEL':
imprecise_del = pd.concat([imprecise_del , data.iloc[[i]]])
print('ins',imprecise_ins)
print('del',imprecise_del)
print('all',imprecise)
# deimprecise.to_csv(out_dir,index=None)
return
def filterImprecise(file_name,out_dir):
data = pd.read_csv(file_name)
deimprecise_ins = pd.DataFrame(columns=data.columns)
deimprecise_del = pd.DataFrame(columns=data.columns)
deimprecise = pd.DataFrame(columns=data.columns)
process_count = 0; process_path = data.shape[0]/100
for i in range(data.shape[0]):
if i >= process_path * process_count:
process_bar(process_count+1)
process_count = process_count + 1
sv_type =svType(data.iloc[[i]])
if 'IMPRECISE' not in data['INFO'].iloc[i]:
deimprecise = pd.concat([deimprecise, data.iloc[[i]]])
if sv_type == 'INS':
deimprecise_ins = | pd.concat([deimprecise_ins, data.iloc[[i]]]) | pandas.concat |
##
# Many of my features are taken from or inspired by public kernels. The
# following is a probably incomplete list of these kernels:
# - https://www.kaggle.com/ggeo79/j-coupling-lightbgm-gpu-dihedral-angle for
# the idea to use dihedral angles on 3J couplings.
# - https://www.kaggle.com/titericz/giba-r-data-table-simple-features-1-17-lb
# mostly for distance features.
# - https://www.kaggle.com/kmat2019/effective-feature provides the idea to
# compute cosine angles between scalar coupling atoms and their nearest
# neighbors.
# - https://www.kaggle.com/seriousran/just-speed-up-calculate-distance-from-benchmark
# for an efficient distance calculation between scalar coupling atoms.
#
# Running this script will give some warnings related to the
# 'explicit valance..' rdkit error. The problem is dicussed here
# https://www.kaggle.com/c/champs-scalar-coupling/discussion/94274#latest-572435
# I hadn't gotten around to implementing the proper solutions discussed there.
import gc
import numpy as np
import pandas as pd
from itertools import combinations
from glob import glob
import deepchem as dc
from rdkit.Chem import rdmolops, ChemicalFeatures
from xyz2mol import read_xyz_file, xyz2mol
from utils import print_progress
import constants as C
#mol_feat_columns = ['ave_bond_length', 'std_bond_length', 'ave_atom_weight']
xyz_filepath_list = os.listdir(C.RAW_DATA_PATH + 'structures')
xyz_filepath_list.sort()
## Functions to create the RDKit mol objects
def mol_from_xyz(filepath, add_hs=True, compute_dist_centre=False):
"""Wrapper function for calling xyz2mol function."""
charged_fragments = True # alternatively radicals are made
# quick is faster for large systems but requires networkx
# if you don't want to install networkx set quick=False and
# uncomment 'import networkx as nx' at the top of the file
quick = True
atomicNumList, charge, xyz_coordinates = read_xyz_file(filepath)
mol, dMat = xyz2mol(atomicNumList, charge, xyz_coordinates,
charged_fragments, quick, check_chiral_stereo=False)
return mol, np.array(xyz_coordinates), dMat
def get_molecules():
"""
Constructs rdkit mol objects derrived from the .xyz files. Also returns:
- mol ids (unique numerical ids)
- set of molecule level features
- arrays of xyz coordinates
- euclidean distance matrices
- graph distance matrices.
All objects are returned in dictionaries with 'mol_name' as keys.
"""
mols, mol_ids, mol_feats = {}, {}, {}
xyzs, dist_matrices, graph_dist_matrices = {}, {}, {}
print('Create molecules and distance matrices.')
for i in range(C.N_MOLS):
print_progress(i, C.N_MOLS)
filepath = xyz_filepath_list[i]
mol_name = filepath.split('/')[-1][:-4]
mol, xyz, dist_matrix = mol_from_xyz(filepath)
mols[mol_name] = mol
xyzs[mol_name] = xyz
dist_matrices[mol_name] = dist_matrix
mol_ids[mol_name] = i
# make padded graph distance matrix dataframes
n_atoms = len(xyz)
graph_dist_matrix = pd.DataFrame(np.pad(
rdmolops.GetDistanceMatrix(mol),
[(0, 0), (0, C.MAX_N_ATOMS - n_atoms)], 'constant'
))
graph_dist_matrix['molecule_id'] = n_atoms * [i]
graph_dist_matrices[mol_name] = graph_dist_matrix
# compute molecule level features
adj_matrix = rdmolops.GetAdjacencyMatrix(mol)
atomic_num_list, _, _ = read_xyz_file(filepath)
dists = dist_matrix.ravel()[np.tril(adj_matrix).ravel()==1]
mol_feats[mol_name] = pd.Series(
[np.mean(dists), np.std(dists), np.mean(atomic_num_list)],
index=mol_feat_columns
)
return mols, mol_ids, mol_feats, xyzs, dist_matrices, graph_dist_matrices
## Functions to create features at the scalar coupling level.
def map_atom_info(df, atom_idx, struct_df):
"""Adds xyz-coordinates of atom_{atom_idx} to 'df'."""
df = pd.merge(df, struct_df, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
df = df.rename(columns={'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}'})
return df
def add_dist(df, struct_df):
"""Adds euclidean distance between scalar coupling atoms to 'df'."""
df = map_atom_info(df, 0, struct_df)
df = map_atom_info(df, 1, struct_df)
p_0 = df[['x_0', 'y_0', 'z_0']].values
p_1 = df[['x_1', 'y_1', 'z_1']].values
df['dist'] = np.linalg.norm(p_0 - p_1, axis=1)
df.drop(columns=['x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1'], inplace=True)
return df
def transform_per_atom_group(df, a_idx, col='dist', trans='mean'):
"""Apply transformation 'trans' on feature in 'col' to scalar coupling
constants grouped at the atom level."""
return df.groupby(
['molecule_name', f'atom_index_{a_idx}'])[col].transform(trans)
def inv_dist_per_atom(df, a_idx, d_col='dist', power=3):
"""Compute sum of inverse distances of scalar coupling constants grouped at
the atom level."""
trans = lambda x: 1 / sum(x ** -power)
return transform_per_atom_group(df, a_idx, d_col, trans=trans)
def inv_dist_harmonic_mean(df, postfix=''):
"""Compute the harmonic mean of inverse distances of atom_0 and atom_1."""
c0, c1 = 'inv_dist0' + postfix, 'inv_dist1' + postfix
return (df[c0] * df[c1]) / (df[c0] + df[c1])
def add_atom_counts(df, struct_df):
"""Add atom counts (total and per type) to 'df'."""
pd.options.mode.chained_assignment = None
atoms_per_mol_df = struct_df.groupby(['molecule_name', 'atom']).count()
atoms_per_mol_map = atoms_per_mol_df['atom_index'].unstack().fillna(0)
atoms_per_mol_map = atoms_per_mol_map.astype(int).to_dict()
df['num_atoms'] = 0
for a in atoms_per_mol_map:
df[f'num_{a}_atoms'] = df['molecule_name'].map(atoms_per_mol_map[a])
df['num_atoms'] += df[f'num_{a}_atoms']
return df
# source: https://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python
def dihedral(p):
"""Praxeolitic formula: 1 sqrt, 1 cross product"""
p0 = p[0]
p1 = p[1]
p2 = p[2]
p3 = p[3]
b0 = -1.0*(p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= np.linalg.norm(b1)
# vector rejections
v = b0 - np.dot(b0, b1)*b1
w = b2 - np.dot(b2, b1)*b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
def cosine_angle(p):
p0, p1, p2 = p[0], p[1], p[2]
v1, v2 = p0 - p1, p2 - p1
return np.dot(v1, v2) / np.sqrt(np.dot(v1, v1) * np.dot(v2, v2))
def add_sc_angle_features(df, xyzs, dist_matrices):
"""
Adds the following angle features to 'df':
- diangle: for 3J couplings
- cos_angle: for 2J couplings, angle between sc atom 0, atom in between sc
atoms and sc atom 1
- cos_angle0: for all coupling types, cos angle between sc atoms and atom
closest to atom 0 (except for 1J coupling)
- cos_angle1: for all coupling types, cos angle between sc atoms and atom
closest to atom 1
"""
df['diangle'] = 0.0
df['cos_angle'] = 0.0
df['cos_angle0'] = 0.0
df['cos_angle1'] = 0.0
diangles, cos_angles, cos_angles0, cos_angles1 = {}, {}, {}, {}
print('Add scalar coupling angle based features.')
n = len(df)
for i, (idx, row) in enumerate(df.iterrows()):
print_progress(i, n, 500000)
mol_name = row['molecule_name']
mol, xyz = mols[mol_name], xyzs[mol_name]
dist_matrix = dist_matrices[mol_name]
adj_matrix = rdmolops.GetAdjacencyMatrix(mol)
idx0, idx1 = row['atom_index_0'], row['atom_index_1']
atom_ids = rdmolops.GetShortestPath(mol, idx0, idx1)
if len(atom_ids)==4:
diangles[idx] = dihedral(xyz[atom_ids,:])
elif len(atom_ids)==3:
cos_angles[idx] = cosine_angle(xyz[atom_ids,:])
if row['type'] not in [0, 2]:
neighbors0 = np.where(adj_matrix[idx0]==1)[0]
if len(neighbors0) > 0:
idx0_closest = neighbors0[
dist_matrix[idx0][neighbors0].argmin()]
cos_angles0[idx] = cosine_angle(
xyz[[idx0_closest, idx0, idx1],:])
neighbors1 = np.setdiff1d(np.where(adj_matrix[idx1]==1)[0], [idx0])
if len(neighbors1) > 0:
idx1_closest = neighbors1[
dist_matrix[idx1][neighbors1].argmin()]
cos_angles1[idx] = cosine_angle(
xyz[[idx0, idx1, idx1_closest],:])
df['diangle'] = pd.Series(diangles).abs()
df['cos_angle'] = pd.Series(cos_angles)
df['cos_angle0'] = pd.Series(cos_angles0)
df['cos_angle1'] = pd.Series(cos_angles1)
df.fillna(0., inplace=True)
return df
def add_sc_features(df, structures_df, mol_feats, xyzs, dist_matrices, mol_ids):
"""Add scalar coupling edge and molecule level features to 'df'."""
# add euclidean distance between scalar coupling atoms
df = add_dist(df, structures_df)
# compute distance normalized by scalar coupling type mean and std
gb_type_dist = df.groupby('type')['dist']
df['normed_dist'] = ((df['dist'] - gb_type_dist.transform('mean'))
/ gb_type_dist.transform('std'))
# add distance features adjusted for atom radii and electronegativity
df['R0'] = df['atom_0'].map(C.ATOMIC_RADIUS)
df['R1'] = df['atom_1'].map(C.ATOMIC_RADIUS)
df['E0'] = df['atom_0'].map(C.ELECTRO_NEG)
df['E1'] = df['atom_1'].map(C.ELECTRO_NEG)
df['dist_min_rad'] = df['dist'] - df['R0'] - df['R1']
df['dist_electro_neg_adj'] = df['dist'] * (df['E0'] + df['E1']) / 2
df.drop(columns=['R0','R1','E0','E1'], inplace=True)
# map scalar coupling types to integers and add dummy variables
df['type'] = df['type'].map(C.TYPES_MAP)
df = pd.concat((df, pd.get_dummies(df['type'], prefix='type')), axis=1)
# add angle related features
df = add_sc_angle_features(df, xyzs, dist_matrices)
# add molecule level features
mol_feat_df = pd.concat(mol_feats, axis=1).T
mol_feat_dict = mol_feat_df.to_dict()
for f in mol_feat_columns:
df[f] = df['molecule_name'].map(mol_feat_dict[f])
# add atom counts per molecule
df = add_atom_counts(df, structures_df)
# add molecule ids
df['molecule_id'] = df['molecule_name'].map(mol_ids)
return df
def store_train_and_test(all_df):
"""Split 'all_df' back to train and test and store the resulting dfs."""
train_df = all_df.iloc[:C.N_SC_TRAIN]
test_df = all_df.iloc[C.N_SC_TRAIN:]
train_df.drop(columns='molecule_name', inplace=True)
test_df.drop(columns='molecule_name', inplace=True)
test_df.drop(columns='scalar_coupling_constant', inplace=True)
# Add scalar coupling contributions to train and normalize
contribs_df = pd.read_csv(
C.RAW_DATA_PATH + 'scalar_coupling_contributions.csv')
train_df = pd.concat((train_df, contribs_df[C.CONTRIB_COLS]), axis=1)
train_df[[C.TARGET_COL, 'fc']] = \
(train_df[[C.TARGET_COL, 'fc']] - C.SC_MEAN) / C.SC_STD
train_df[C.CONTRIB_COLS[1:]] = train_df[C.CONTRIB_COLS[1:]] / C.SC_STD
train_df.to_csv(C.PROC_DATA_PATH + 'train_proc_df.csv')
test_df.to_csv(C.PROC_DATA_PATH + 'test_proc_df.csv')
## Functions to create atom and bond level features
def one_hot_encoding(x, set):
one_hot = [int(x == s) for s in set]
return one_hot
def get_bond_features(mol, eucl_dist):
"""
Compute the following features for each bond in 'mol':
- bond type: categorical {1: single, 2: double, 3: triple,
4: aromatic} (one-hot)
- is conjugated: bool {0, 1}
- is in ring: bool {0, 1}
- euclidean distance: float
- normalized eucl distance: float
"""
n_bonds = mol.GetNumBonds()
features = np.zeros((n_bonds, C.N_BOND_FEATURES))
bond_idx = np.zeros((n_bonds, 2))
for n, e in enumerate(mol.GetBonds()):
i = e.GetBeginAtomIdx()
j = e.GetEndAtomIdx()
dc_e_feats = dc.feat.graph_features.bond_features(e).astype(int)
features[n, :6] = dc_e_feats
features[n, 6] = eucl_dist[i, j]
bond_idx[n] = i, j
sorted_idx = bond_idx[:,0].argsort()
dists = features[:, 6]
features[:, 7] = (dists - dists.mean()) / dists.std() # normed_dist
return features[sorted_idx], bond_idx[sorted_idx]
def get_atom_features(mol, dist_matrix):
"""
Compute the following features for each atom in 'mol':
- atom type: H, C, N, O, F (one-hot)
- degree: 1, 2, 3, 4, 5 (one-hot)
- Hybridization: SP, SP2, SP3, UNSPECIFIED (one-hot)
- is aromatic: bool {0, 1}
- formal charge: int
- atomic number: float
- average bond length: float
- average weight of neigboring atoms: float
- donor: bool {0, 1}
- acceptor: bool {0, 1}
"""
n_atoms = mol.GetNumAtoms()
features = np.zeros((n_atoms, C.N_ATOM_FEATURES))
adj_matrix = rdmolops.GetAdjacencyMatrix(mol)
for a in mol.GetAtoms():
idx = a.GetIdx()
if sum(adj_matrix[idx]) > 0:
ave_bond_length = np.mean(dist_matrix[idx][adj_matrix[idx]==1])
ave_neighbor_wt = np.mean(
[n.GetAtomicNum() for n in a.GetNeighbors()])
else:
ave_bond_length, ave_neighbor_wt = 0.0, 0.0
sym = a.GetSymbol()
a_feats = one_hot_encoding(sym, C.SYMBOLS) \
+ one_hot_encoding(a.GetDegree(), C.DEGREES) \
+ one_hot_encoding(a.GetHybridization(), C.HYBRIDIZATIONS) \
+ [a.GetIsAromatic(), a.GetFormalCharge(), a.GetAtomicNum(),
ave_bond_length, ave_neighbor_wt]
features[idx, :len(a_feats)] = np.array(a_feats)
feat_factory = ChemicalFeatures.BuildFeatureFactory(C.FDEF)
try:
chem_feats = feat_factory.GetFeaturesForMol(mol)
for t in range(len(chem_feats)):
if chem_feats[t].GetFamily() == 'Donor':
for i in chem_feats[t].GetAtomIds():
features[i, -2] = 1
elif chem_feats[t].GetFamily() == 'Acceptor':
for i in chem_feats[t].GetAtomIds():
features[i, -1] = 1
except RuntimeError as e:
print(e)
return features
def get_atom_and_bond_features(mols, mol_ids, dist_matrices):
atom_features, bond_features = [], []
bond_idx, atom_to_m_id, bond_to_m_id = [], [], []
print('Get atom and bond features.')
for it, m_name in enumerate(mols):
print_progress(it, C.N_MOLS)
m_id, mol = mol_ids[m_name], mols[m_name]
dist_matrix = dist_matrices[m_name]
n_atoms, n_bonds = mol.GetNumAtoms(), mol.GetNumBonds()
atom_features.append(get_atom_features(mol, dist_matrix))
e_feats, b_idx = get_bond_features(mol, dist_matrix)
bond_features.append(e_feats)
bond_idx.append(b_idx)
atom_to_m_id.append(np.repeat(m_id, n_atoms))
bond_to_m_id.append(np.repeat(m_id, n_bonds))
atom_features = pd.DataFrame(
np.concatenate(atom_features), columns=C.ATOM_FEATS)
bond_features = pd.DataFrame(
np.concatenate(bond_features), columns=C.BOND_FEATS)
bond_idx = np.concatenate(bond_idx)
bond_features['idx_0'] = bond_idx[:,0]
bond_features['idx_1'] = bond_idx[:,1]
atom_features['molecule_id'] = np.concatenate(atom_to_m_id)
bond_features['molecule_id'] = np.concatenate(bond_to_m_id)
return atom_features, bond_features
def store_atom_and_bond_features(atom_df, bond_df):
atom_df.to_csv(C.PROC_DATA_PATH + 'atom_df.csv')
bond_df.to_csv(C.PROC_DATA_PATH + 'bond_df.csv')
## Functions to store distance matrices
def store_graph_distances(graph_dist_matrices):
graph_dist_df = pd.concat(graph_dist_matrices)
graph_dist_df.reset_index(drop=True, inplace=True)
graph_dist_df.replace(1e8, 10, inplace=True) # fix for one erroneous atom
graph_dist_df = graph_dist_df.astype(int)
graph_dist_df.to_csv(C.PROC_DATA_PATH + 'graph_dist_df.csv')
def store_eucl_distances(dist_matrices, atom_df):
dist_df = pd.DataFrame(np.concatenate(
[np.pad(dm, [(0,0), (0, C.MAX_N_ATOMS-dm.shape[1])], mode='constant')
for dm in dist_matrices.values()]
))
dist_df['molecule_id'] = atom_df['molecule_id']
dist_df.to_csv(C.PROC_DATA_PATH + 'dist_df.csv')
## Functions to compute cosine angles for all bonds
def _get_combinations(idx_0_group):
s = list(idx_0_group['idx_1'])[1:]
return [list(combinations(s, r))[-1] for r in range(len(s), 0, -1)]
def get_all_cosine_angles(bond_df, structures_df, mol_ids, store=True):
"""Compute cosine angles between all bonds. Grouped at the bond level."""
bond_idx = bond_df[['molecule_id', 'idx_0', 'idx_1']].astype(int)
in_out_idx = pd.concat((
bond_idx,
bond_idx.rename(columns={'idx_0': 'idx_1', 'idx_1': 'idx_0'})
), sort=False)
gb_mol_0_bond_idx = in_out_idx.groupby(['molecule_id', 'idx_0'])
angle_idxs = []
print('Get cosine angle indices.')
for it, (mol_id, idx_0) in enumerate(gb_mol_0_bond_idx.groups):
# iterate over all atoms (atom_{idx0})
print_progress(it, gb_mol_0_bond_idx.ngroups, print_iter=500000)
idx_0_group = gb_mol_0_bond_idx.get_group((mol_id, idx_0))
combs = _get_combinations(idx_0_group)
for i, comb in enumerate(combs):
# iterate over all bonds of the atom_{idx0} (bond_{idx_0, idx_1})
idx_1 = idx_0_group['idx_1'].iloc[i]
for idx_2 in comb:
# iterate over all angles between bonds with bond_{idx_0, idx_1}
# as base
angle_idxs.append((mol_id, idx_0, idx_1, idx_2))
angle_cols = ['molecule_id', 'atom_index_0', 'atom_index_1', 'atom_index_2']
angle_df = pd.DataFrame(angle_idxs, columns=angle_cols)
angle_df['molecule_name'] = angle_df['molecule_id'].map(
{v:k for k,v in mol_ids.items()})
angle_df.drop(columns='molecule_id', inplace=True)
for i in range(3): angle_df = map_atom_info(angle_df, i, structures_df)
drop_cols = ['atom_0', 'atom_1', 'atom_2', 'molecule_id_x', 'molecule_id_y']
angle_df.drop(columns=drop_cols, inplace=True)
for c in ['x', 'y', 'z']:
angle_df[f'{c}_0_1'] = \
angle_df[f'{c}_0'].values - angle_df[f'{c}_1'].values
angle_df[f'{c}_0_2'] = \
angle_df[f'{c}_0'].values - angle_df[f'{c}_2'].values
def cos_angles(v1, v2):
return (v1 * v2).sum(1) / np.sqrt((v1 ** 2).sum(1) * (v2 ** 2).sum(1))
angle_df['cos_angle'] = cos_angles(
angle_df[['x_0_1', 'y_0_1', 'z_0_1']].values,
angle_df[['x_0_2', 'y_0_2', 'z_0_2']].values
)
angle_df = angle_df[['molecule_id', 'atom_index_0', 'atom_index_1',
'atom_index_2', 'cos_angle']]
gb_mol_angle = angle_df.groupby('molecule_id')
gb_mol_bond_idx = bond_idx.groupby('molecule_id')
angle_to_in_bond, angle_to_out_bond = [], []
print('Get cosine angles.')
for i, mol_id in enumerate(mol_ids.values()):
print_progress(i, C.N_MOLS)
b_df = gb_mol_bond_idx.get_group(mol_id)
a_df = gb_mol_angle.get_group(mol_id)
b_in_idxs = b_df[['idx_0', 'idx_1']].values
b_out_idxs = b_df[['idx_1', 'idx_0']].values
a1 = a_df[['atom_index_0', 'atom_index_1', 'cos_angle']].values
a2 = a_df[['atom_index_0', 'atom_index_2', 'cos_angle']].values
for a in np.concatenate((a1, a2)):
if any(np.all(b_in_idxs==a[:2], axis=1)):
a_to_in_idx = np.where(np.all(b_in_idxs==a[:2], axis=1))[0][0]
angle_to_in_bond.append((mol_id, a_to_in_idx, a[-1]))
if any(np.all(b_out_idxs==a[:2], axis=1)):
a_to_out_idx = np.where(np.all(b_out_idxs==a[:2], axis=1))[0][0]
angle_to_out_bond.append((mol_id, a_to_out_idx, a[-1]))
angle_in_df = pd.DataFrame(
angle_to_in_bond, columns=['molecule_id', 'b_idx', 'cos_angle'])
angle_out_df = pd.DataFrame(
angle_to_out_bond, columns=['molecule_id', 'b_idx', 'cos_angle'])
if store: store_angles(angle_in_df, angle_out_df)
return angle_in_df, angle_out_df
def store_angles(angle_in_df, angle_out_df):
angle_in_df.to_csv(C.PROC_DATA_PATH + 'angle_in_df.csv')
angle_out_df.to_csv(C.PROC_DATA_PATH + 'angle_out_df.csv')
def process_and_store_structures(structures_df, mol_ids):
structures_df['molecule_id'] = structures_df['molecule_name'].map(mol_ids)
structures_df.to_csv(C.PROC_DATA_PATH + 'structures_proc_df.csv')
return structures_df
def _clear_memory(var_strs):
for var_str in var_strs: del globals()[var_str]
gc.collect()
if __name__=='__main__':
# import data
train_df = pd.read_csv(C.RAW_DATA_PATH + 'train.csv', index_col=0)
test_df = pd.read_csv(C.RAW_DATA_PATH + 'test.csv', index_col=0)
structures_df = | pd.read_csv(C.RAW_DATA_PATH + 'structures.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from . import util as DataUtil
from . import cols as DataCol
"""
The main data loader.
TODO: population & common special dates
"""
class DataCenter:
def __init__(self):
self.__kabko = None
self.__dates_global = pd.DataFrame([], columns=DataCol.DATES_GLOBAL)
self.__dates_local = pd.DataFrame([], columns=DataCol.DATES_LOCAL)
self.__date_names_global = np.array([])
self.__date_names_local = np.array([])
self.__population_global = None
self.__covid_local = None
self.raw_global = None
self.data_global = None
def load_covid_local(
self,
df,
kabko_col="kabko",
date_col="date",
rename_cols={
"infected": DataCol.I,
"infectious": DataCol.I,
"recovered": DataCol.R,
"dead": DataCol.D
},
drop_cols=["infected_total"],
drop_first_col=False,
exclude_kabkos=[
"AWAK BUAH KAPAL",
"RS LAPANGAN INDRAPURA"
]
):
df = df.copy()
labels = [DataCol.I, DataCol.R, DataCol.D]
df.loc[:, date_col] = pd.to_datetime(df[date_col])
drop_cols = [df.columns[0], *drop_cols] if drop_first_col else drop_cols
df.drop(columns=drop_cols, axis=1, inplace=True)
df.drop(df.index[df[kabko_col].isin(exclude_kabkos)], inplace=True)
rename_cols = {
kabko_col: DataCol.KABKO,
date_col: DataCol.DATE,
**rename_cols
}
df.rename(columns=rename_cols, inplace=True)
df.loc[:, labels] = df[labels].astype(DataUtil.DEFAULT_DTYPE)
self.__covid_local = df
self.__kabko = df[kabko_col].unique()
return self.__covid_local
@property
def kabko(self):
if self.__kabko is None:
if self.__covid_local is None:
raise Exception("Please set/load covid data first")
self.load_covid_local(self.__covid_local)
return self.__kabko
@property
def covid_local(self):
return self.__covid_local
def load_vaccine(
self,
df,
date_col="date",
labels_orig=[
"people_vaccinated",
"people_fully_vaccinated"
]
):
if labels_orig:
df = df[[date_col, *labels_orig]]
df = df.copy()
df.loc[:, date_col] = pd.to_datetime(df[date_col])
rename_cols = {
date_col: DataCol.DATE,
**dict(zip(labels_orig, [
DataCol.VAC_PEOPLE,
DataCol.VAC_FULL
]))
}
df.rename(columns=rename_cols, inplace=True)
df.set_index(DataCol.DATE, inplace=True)
self.__vaccine = df
return self.__vaccine
@property
def vaccine(self):
return self.__vaccine
def load_test(
self,
df,
date_col="Date",
label_orig="Cumulative total"
):
if label_orig:
df = df[[date_col, label_orig]]
df = df.copy()
df.loc[:, date_col] = pd.to_datetime(df[date_col])
rename_cols = {
date_col: DataCol.DATE,
label_orig: DataCol.TEST
}
df.rename(columns=rename_cols, inplace=True)
df.set_index(DataCol.DATE, inplace=True)
self.__test = df
return self.__test
@property
def test(self):
return self.__test
def load_covid_global(
self,
df,
date_col="date",
label_orig="total_cases"
):
if label_orig:
df = df[[date_col, label_orig]]
df = df.copy()
df.loc[:, date_col] = pd.to_datetime(df[date_col])
rename_cols = {
date_col: DataCol.DATE,
label_orig: DataCol.I_TOT_GLOBAL
}
df.rename(columns=rename_cols, inplace=True)
df.set_index(DataCol.DATE, inplace=True)
self.__covid_global = df
return self.__covid_global
@property
def covid_global(self):
return self.__covid_global
def load_dates(
self,
df,
name=None,
name_col="name",
start_col="start", end_col="end",
val_col="value"
):
if name is None and DataCol.NAME not in df.columns and name_col not in df.columns:
raise Exception("Provide name argument if dataframe doesn't have name column")
if name is not None and (DataCol.NAME in df.columns or name_col in df.columns):
raise Exception("Dataframe already has name column but name argument was given")
df = DataUtil.prepare_dates(
df,
name_col=name_col,
start_col=start_col,
end_col=end_col,
val_col=val_col
)
if name is not None and DataCol.NAME not in df.columns:
df[DataCol.NAME] = pd.Series(np.array(len(df) * [name]), dtype=str)
if DataCol.KABKO not in df.columns:
df = df[DataCol.DATES_GLOBAL]
self.__dates_global = pd.concat([self.__dates_global, df])
self.__date_names_global = self.__dates_global[DataCol.NAME].unique()
else:
df = df[DataCol.DATES_LOCAL]
self.__dates_local = pd.concat([self.__dates_local, df])
self.__date_names_local= self.__dates_local[DataCol.NAME].unique()
self.__date_names = np.unique(np.concatenate([self.__date_names_global, self.__date_names_local]))
return df
@property
def dates_global(self):
return self.__dates_global
@property
def dates_local(self):
return self.__dates_local
@property
def date_names_global(self):
return self.__date_names_global
@property
def date_names_local(self):
return self.__date_names_local
@property
def date_names(self):
return self.__date_names
def get_covid_kabko(
self,
kabko
):
covid = self.covid_local.loc[
self.covid_local[DataCol.KABKO] == kabko,
[
DataCol.DATE,
*DataCol.IRD
]
].copy()
# del covid["kabko"]
covid.set_index(DataCol.DATE, inplace=True)
covid.sort_index(ascending=True, inplace=True)
return covid
def get_dates_kabko(
self,
kabko
):
dates = pd.concat([
self.dates_global.copy(),
self.dates_local[self.dates_local[DataCol.KABKO] == kabko][DataCol.DATES_GLOBAL]
])
return dates
def load_population(
self,
df,
kabko_col="kabko",
label_orig="semua"
):
if label_orig:
df = df[[kabko_col, label_orig]]
df = df.copy()
rename_cols = {
kabko_col: DataCol.KABKO,
label_orig: DataCol.N
}
df.rename(columns=rename_cols, inplace=True)
self.__population = df
self.__population_global = self.get_population_kabko("INDONESIA")
return self.__population
@property
def population(self):
return self.__population
@property
def population_global(self):
return self.__population_global
def get_population_kabko(
self,
kabko
):
return self.population[self.population[DataCol.KABKO] == kabko][DataCol.N].values[0]
def set_global_ts(
self,
vaccine,
test,
covid_global
):
self.__vaccine = vaccine
self.__test = test
self.__covid_global = covid_global
# Full of defaults
# For custom, DIY
def load_excel(
self,
path,
):
self.load_covid_global(pd.read_excel(path, sheet_name="covid_indo"))
self.load_covid_local(pd.read_excel(path, sheet_name="covid_jatim"))
self.load_vaccine( | pd.read_excel(path, sheet_name="vaccine") | pandas.read_excel |
from django.shortcuts import render
from django.contrib import messages
from .models import iabgInputForm
from .custom_apps.intersight_rest import intersight_get
from .custom_apps.as_built import create_word_doc_paragraph, create_word_doc_table, create_word_doc_title
import pandas as pd
import os
from .forms import iabgForm
# Create your views here.
def index(request):
return render(request,'is_abg/index.html')
def charts(request):
return render(request,'is_abg/charts.html')
def broken(request):
return render(request,'is_abg/broke.html')
def ua(request):
return render(request,'is_abg/utilities-animation.html')
def ub(request):
return render(request,'is_abg/utilities-border.html')
def uo(request):
return render(request,'is_abg/utilities-other.html')
def uc(request):
return render(request,'is_abg/utilities-color.html')
def buttons(request):
return render(request,'is_abg/buttons.html')
def cards(request):
return render(request,'is_abg/cards.html')
def tables(request):
return render(request,'is_abg/tables.html')
def abg(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = iabgForm(request.POST)
# check whether it's valid:
if form.is_valid():
for i in iabgInputForm.objects.all():
i.delete()
post = form.save(commit=False)
post.save()
for i in iabgInputForm.objects.all():
blade_server = intersight_get(resource_path='/compute/Blades',
private_key=i.private_api_key, public_key=i.public_api_key)
compute_summary = intersight_get(
resource_path='/compute/PhysicalSummaries',
private_key=i.private_api_key, public_key=i.public_api_key,
query_params={
"$select": "AvailableMemory,CpuCapacity,Dn,Firmware,Model,Name,OperPowerState,OperState"
})
rack_server = intersight_get(resource_path='/compute/RackUnits',
private_key=i.private_api_key, public_key=i.public_api_key)
physical_ports = intersight_get(
resource_path='/ether/PhysicalPorts',
private_key=i.private_api_key, public_key=i.public_api_key,
query_params={
"$filter": "OperState eq 'up'",
"$orderby": "Dn",
"$select": "AggregatePortId,Mode,Dn,OperSpeed,OperState,PeerDn,TransceiverType"
})
fc_ports = intersight_get(resource_path='/fc/PhysicalPorts',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={
"$filter": "OperState eq 'up'",
"$orderby": "Dn",
"$select": "PortChannelId,OperSpeed,Dn,Mode,OperState,Wwn"
})
firmware_running = intersight_get(resource_path='/firmware/RunningFirmwares',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={
"$orderby": "Type",
"$select": "Dn,Type,Version,PackageVersion,ObjectType,Component"})
hyperflex_cluster = intersight_get(resource_path='/hyperflex/Clusters',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={
"$select": "Summary"})
hyperflex_node = intersight_get(resource_path='/hyperflex/Nodes',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={
"$select": "DisplayVersion,HostName,Ip,ModelNumber,Role,SerialNumber,Status,Version"})
hyperflex_health = intersight_get(resource_path='/hyperflex/Healths',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={"$select": "ResiliencyDetails"})
service_profile = intersight_get(resource_path='/ls/ServiceProfiles',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={"$filter": "AssignState eq 'assigned'", "$orderby": "OperState"})
management_address = intersight_get(resource_path='/management/Interfaces',
private_key=i.private_api_key,
public_key=i.public_api_key,
query_params={"$select": "Dn,Ipv4Address,Ipv4Mask,Ipv4Gateway,MacAddress"})
if firmware_running == None:
return render(request, 'is_abg/broke.html')
management_df = pd.DataFrame.from_dict(management_address['Results'])
management_df = management_df.drop(columns=['ClassId', 'Moid', 'ObjectType'])
service_profile_df = pd.DataFrame.from_dict(service_profile['Results'])
service_profile_df = service_profile_df.drop(columns=['ClassId', 'Moid', 'ObjectType',
'Owners', 'DeviceMoId', 'DomainGroupMoid','CreateTime','PermissionResources',
'RegisteredDevice', 'Rn', 'SharedScope', 'Tags', 'ModTime', 'AccountMoid'])
hyperflex_health_df = pd.DataFrame.from_dict(hyperflex_health['Results'])
hyperflex_node_df = pd.DataFrame.from_dict(hyperflex_node['Results'])
hyperflex_node_df = hyperflex_node_df.drop(columns=['ClassId', 'Moid', 'ObjectType'])
firmware_running_df = pd.DataFrame.from_dict(firmware_running['Results'])
firmware_running_df = firmware_running_df.drop(columns=['ClassId', 'Moid', 'ObjectType'])
fc_ports_df = | pd.DataFrame.from_dict(fc_ports['Results']) | pandas.DataFrame.from_dict |
import requests
import json
import pandas as pd
import os.path
from warnings import warn
from bs4 import BeautifulSoup
from openpyxl import load_workbook
from openpyxl.styles import PatternFill
import seaborn as sns
class SessionWithUrlBase(requests.Session):
"""
from https://github.com/quintel/third-party/blob/master/Python_ETM_API/ETM_API.py
Helper class to store the base url. This allows us to only type the
relevant additional information.
from: https://stackoverflow.com/questions/42601812/python-requests-url-base-in-session
"""
def __init__(self, url_base=None, *args, **kwargs):
super(SessionWithUrlBase, self).__init__(*args, **kwargs)
if url_base is None:
url_base = "https://engine.energytransitionmodel.com/api/v3"
self.url_base = url_base
def request(self, method, url, **kwargs):
modified_url = self.url_base + url
return super(SessionWithUrlBase, self).request(method, modified_url, **kwargs)
class ETMapi:
def __init__(
self,
area_code="nl",
scenario_id=None,
end_year=2050,
source="api-WB",
url_base=None,
):
self.session = SessionWithUrlBase(url_base=url_base)
self.source = source
self.area_code = area_code
self.end_year = end_year
self.browse_url = "https://pro.energytransitionmodel.com/scenarios/"
self.headers = {
"Content-Type": "application/json",
}
self.verbose = True
self.debug = False
self.user_values = dict()
# some default metrics to query
self.gqueries = [
"dashboard_reduction_of_co2_emissions_versus_1990",
"dashboard_renewability",
"dashboard_energy_import_netto",
]
self.scenario_id = scenario_id
@property
def scenario_id(self):
return self._scenario_id
@scenario_id.setter
def scenario_id(self, value):
self._scenario_id = self.id_extractor(value)
def get_title(self, scenario_id):
if scenario_id is not None:
r = self.session.get(f"/scenarios/{scenario_id}/")
if r.status_code != 200:
raise ValueError("Response not 200")
di = json.loads(r.content)
title = di["title"]
else:
title = ""
return title
def create_new_scenario(self, title, use_custom_values=False):
"""
POST-method to create a scenario in ETM using ETM v3 API. Converts non-strings to strings.
Pushes the setup as json in correct format voor ETM v3 API.
arguments:
area_code (optional, one of the existing area codes (use etm.area_codes() to obtain))
title (title of scenario)
end_year (interger end year)
source (paper trail, recommended to use)
scenario_id (refference scenario)
verbose (false turns off all print and warn)
returns:
requests response object
"""
scenario_setup = {
"scenario": {
"area_code": str(self.area_code),
"title": str(title),
"end_year": str(self.end_year),
"source": str(self.source),
"scenario_id": None,
}
}
url = "/scenarios/"
r = self.session.post(url, json=scenario_setup, headers=self.headers)
if r.status_code == 200:
api_url = json.loads(r.content)["url"]
self.scenario_id = api_url.split("/")[-1]
self.browse_url += self.scenario_id
if use_custom_values:
p = self._change_inputs()
if not p.status_code == 200:
raise ValueError(f"Response not succesful: {p.json()['errors']}")
if self.verbose:
print()
print("Browsable URL to scenario:")
print(self.browse_url)
if not r.status_code == 200:
raise ValueError(f"Response not succesful: {r.status_code}")
if self.debug:
return r
else:
pass
def _change_inputs(self):
"""
Change inputs to ETM bases on self.user_values.
"""
input_data = {
"scenario": {"user_values": self.user_values},
"detailed": True,
"gqueries": self.gqueries,
}
url = "/scenarios/"
p = self.session.put(
url + self.scenario_id, json=input_data, headers=self.headers
)
# update metrics
self.metrics = pd.DataFrame.from_dict(p.json()["gqueries"], orient="index")
return p
def update_inputs(self, debug=False):
"""Basicly ._change_inputs but does not return p out of debug"""
p = self._change_inputs()
if debug:
return p
else:
pass
def get_areas(self, filepath=None, refresh=False, save_csv=True):
"""
Use ETM v3 API to request all areas and from that extract only the area codes, to use in
other ETM v3 API reuqests.
arguments:
filepath:
specify location to save to / read from (default: /etm_ref/)
refresh:
specify true to force refresh, checks for existing file on path location to prevent
errors from trying to load non-existing files
save_cvs:
caches the api response to a csv to speed up the process if called again.
returns
set with areas (as atribute)
(saves csv to save requests and allow browsing of codes)
"""
if filepath is None:
filepath = "etm_ref/areacodes.csv"
if refresh or not os.path.isfile(filepath):
url = "/areas/"
r = self.session.get(url)
r_list = json.loads(r.content)
self.areas_raw = r_list
# create a set to refer to if needed (drops dupes & sorts)
areas = set([item["area"] for item in r_list if item["useable"]])
df = pd.DataFrame(r_list)
if save_csv:
df.to_csv(filepath)
else:
df = pd.read_csv(filepath, index_col=0)
areas = set(df["area"])
self.areas = areas
return areas
def get_area_settings(area_code):
"""
Gets the area settings based on the area code.
argument:
area_code (as defined by ETM)
returns:
requests response object
"""
request_url = (
f"https://engine.energytransitionmodel.com/api/v3/areas/{area_code}"
)
response = requests.get(request_url)
return response
def generate_input_worksheet(
self, filepath=None, scenario_list=None, prettify=False
):
if scenario_list is None:
scenario_list = [self.scenario_id]
if filepath is None:
filepath = "latest_generated_worksheet.xlsx"
if self.scenario_id is not None:
filepath = f"latest_generated_worksheet_{self.scenario_id}.xlsx"
# load in front-end variables based on scraped contents
ref = pd.read_csv("etm_ref/clean_scraped_inputs.csv", index_col=0)
# take only what we are interested in
df = ref[["key", "group", "subgroup", "translated_name", "unit"]].copy()
df.columns = ["key", "Section", "Subsection", "Slider name", "Unit"]
for sid in scenario_list:
self.scenario_id = sid # use setter
col_name = self.title + f" ({self.scenario_id})"
df.loc[:, col_name] = | pd.Series(dtype="float64") | pandas.Series |
#General
import streamlit as st
import pandas as pd
import math
#Models
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.svm import LinearSVC
#Preprocessing
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
#Metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay, accuracy_score,recall_score,precision_score
from tempfile import NamedTemporaryFile
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
import base64
from sklearn.metrics import plot_confusion_matrix
from fpdf import FPDF
#SESSION STATE INITIALIZATION
if "light" not in st.session_state:
st.session_state.light = "red"
if "warning" not in st.session_state:
st.session_state.warning = "Undefined Error"
if "model" not in st.session_state:
st.session_state.model= "To_be_selected"
if 'page' not in st.session_state:
st.session_state.page = 0
if "selected_var" not in st.session_state:
st.session_state.selected_var=[]
if "df" not in st.session_state:
st.session_state.df= 0
if "model_type" not in st.session_state:
st.session_state.model_type="To_be_selected"
##################################################
#IMPORT CSS
#with open('style.css') as f:
#st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
####################################################
#Train-test Split by date
#Assumes the column Target to exist
def train_test_by_var(X,y,var):
df= pd.concat([X,y],axis=1)
df.sort_values(by=var,inplace=True)
y = df["Target"]
X= df.drop(["Target"],axis=1)
train_instances= int(math.modf((X.shape[0]/100)*80)[1])
return X.iloc[0:train_instances,:], X.iloc[train_instances: ,:] , y[0:train_instances], y[train_instances :]
if Metrics_explanation:
st.write("**Accuracy**")
##############################################################
#Application
def app():
#Progress bar
st.markdown(
"""
<style>
.stProgress > div > div > div > div {
background-image: linear-gradient(to right, #5b61f9 , #5b61f9);
}
</style>""",
unsafe_allow_html=True,
)
my_bar = st.progress(100)
#Title and description
st.markdown('<h1 style="color: #5b61f9;">Model results</h1>',
unsafe_allow_html=True)
st.write('Those are the performances of your Model.')
#Metrics_explanation = st.button("Find More about Metrics")
st.write("**Accuracy**" +": " + "Percentage of well classified rows.")
st.write("**Precision**" +": " + "Probability that an object predicted to be true is actually true.")
st.write("**Recall**" +": " + "Measure of how many true elements were detected.")
# Import dataframe from session state, adding month for sort the data and check if it is already there to solve strange bug
to_keep= st.session_state.selected_var
if "Month" not in to_keep:
to_keep.append("Month")
df= st.session_state.df[to_keep]
#Divided Target and Predictors
X=df.drop(["Target"],axis=1)
y= df["Target"]
#identify numeric and not numeric
categorical_var= X.select_dtypes(exclude='number').columns.tolist()# include other datatypes
numerical_var= X.select_dtypes(include="number").columns.tolist()
if "Month" in categorical_var:
categorical_var.remove("Month")
if "Month" in categorical_var:
numerical_var.remove("Month")
# Top 9 classes + "other"
for el in X[categorical_var].columns:
if len(list(X[categorical_var][el].value_counts())) > 10:
a = list(X[categorical_var][el].value_counts()[:10].index.tolist())
X[el][X[categorical_var][el].isin(a) == False] = "Other"
#Select the model to be used- Here you can change/add/delete models
models={
"Perceptron": MLPClassifier(solver='adam', alpha=1e-5),
"SVM": LinearSVC(),
"Ensemble": GradientBoostingClassifier(),
"Logistic Regression": LogisticRegressionCV(),
"Simple Tree": DecisionTreeClassifier(),
"Random Forest": RandomForestClassifier()
}
model= models[st.session_state.model]
#Pipeline creation
#1) Steps
numerical_steps=[("Imputer_num",SimpleImputer(strategy="mean"))]
categorical_steps=[("Impupter_cat",SimpleImputer(strategy= "most_frequent")),("Encoder",OneHotEncoder(sparse=False))]
#2) Pipes
numerical_pipe= Pipeline(numerical_steps)
categorical_pipe= Pipeline(categorical_steps)
#3) Transformer
transformer= ColumnTransformer([("Numerical Transformation",numerical_pipe,numerical_var),
("Categorical Transformation",categorical_pipe,categorical_var)],
remainder="passthrough")
Final_Pipe= Pipeline([("Transformer",transformer),("Model",model)])
#Splitting data
#X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train, X_test, y_train, y_test= train_test_by_var(X,y,"Month")
X_train,X_test = X_train.drop(["Month"],axis=1), X_test.drop(["Month"],axis=1)
#Fitting model
Final_Pipe.fit(X_train, y_train)
predictions = Final_Pipe.predict(X_test)
#######################################################
#START REPORT
########################################################
#General Metrics
st.markdown('<h2 style="color: #5B61F9;">Metrics</h2>',
unsafe_allow_html=True)
col1, col2, col3 = st.columns(3)
col1.metric("Accuracy", str(accuracy_score(y_test,predictions))[:4])
col2.metric("Precision", str(precision_score(y_test, predictions))[:4])
col3.metric("Recall", str(recall_score(y_test, predictions))[:4])
st.markdown('<h2 style="color: #5B61F9;">Confusion Matrix</h2>',
unsafe_allow_html=True)
a = plot_confusion_matrix(Final_Pipe, X_test, y_test,cmap="Purples")
st.pyplot(a.figure_)
figs=[]
figs.append(a.figure_)
#Trees specific metrics:
if st.session_state.model=="Random Forest" or st.session_state.model=="Simple Tree":
st.markdown('<h2 style="color: #5B61F9;">Features Importance</h2>',
unsafe_allow_html=True)
df1 = pd.get_dummies(X_train)
feature_importance = (Final_Pipe[1].feature_importances_)
forest_importances = | pd.Series(feature_importance, index=df1.columns) | pandas.Series |
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
import warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import (
is_float_dtype,
is_bool_dtype,
is_integer_dtype,
is_datetime_or_timedelta_dtype,
is_string_dtype,
)
from pandas.core.dtypes.inference import is_list_like
from typing import NamedTuple, Optional
class Field(NamedTuple):
"""Holds all information on a particular field in the mapping"""
index: str
es_field_name: str
is_source: bool
es_dtype: str
es_date_format: Optional[str]
pd_dtype: type
is_searchable: bool
is_aggregatable: bool
is_scripted: bool
aggregatable_es_field_name: str
@property
def is_numeric(self) -> bool:
return is_integer_dtype(self.pd_dtype) or is_float_dtype(self.pd_dtype)
@property
def is_timestamp(self) -> bool:
return is_datetime_or_timedelta_dtype(self.pd_dtype)
@property
def is_bool(self) -> bool:
return | is_bool_dtype(self.pd_dtype) | pandas.core.dtypes.common.is_bool_dtype |
#!/usr/bin/env python
"""Simple check of a csv file against the ledger file
Just to make sure nothing slipped through. It might have a high false-positive
rate, since it only compares some absolute numbers without checking any
additional info, but I think this could be good enough to make sure that I do
not forget any major expense :) Do this once every 4 weeks!
"""
import subprocess
from io import StringIO
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("file", help="CSV file to check against the default ledger file")
parser.add_argument("--verbose", "-v", action="store_true", help="More output")
args = parser.parse_args()
VERBOSE = args.verbose
csv_file = args.file
# 1. Read the CSV into a pd.DataFrame
df = pd.read_csv(
csv_file,
skiprows=12,
skipfooter=2,
skip_blank_lines=True,
encoding="ISO-8859-1",
delimiter=";",
decimal=",",
thousands=".",
dtype=dict(Buchungstag=str, Valuta=str),
engine="python",
)
df.Buchungstag = pd.to_datetime(df.Buchungstag, format="%d%m%Y")
df.Valuta = pd.to_datetime(df.Valuta, format="%d%m%Y")
df["SollHaben"] = df[" "]
df["signed-amount"] = [
u * (1 if s == "H" else -1) for u, s in zip(df["Umsatz"], df["SollHaben"])
]
df["ledger-string"] = [f"{a:.2f}€" for a in df["signed-amount"]]
if not VERBOSE:
df.drop(
[
"Konto-Nr.",
"BLZ",
"Valuta",
"Kundenreferenz",
" ",
"IBAN",
"BIC",
"Währung",
"ledger-string",
"Umsatz",
"SollHaben",
],
axis=1,
inplace=True,
)
# Check if for each entry there is a corresponding ledger entry
for row in df[::-1].iterrows():
date = row[1].Buchungstag
query = [
"ledger",
"csv",
"--begin",
(date - | pd.Timedelta(days=4) | pandas.Timedelta |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(obj, nptd)
rhs = op(obj, pytd)
tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_series(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
| tm.assert_index_equal(result4, expected) | pandas._testing.assert_index_equal |
#!/usr/bin/env python
# coding: utf-8
# systems tools
import os
import shutil
import sys
import time
import sys
import signal
import random
# multiprocess
import threading
import psutil
#format
import string
import json
#sqlite
import sqlite3
#args
import argparse
#maths
import numpy as np
#FIFO
from collections import deque
#plot
import pandas as pd
import seaborn as sns
from IPython.display import display
import matplotlib
#aux lib
import statsTools
#initialization: args + sqlite connection
def init():
#parsing arguments
parser = argparse.ArgumentParser(description='plot graphs with the data processed in a json file.')
parser.add_argument('--dir',
default="./results",
help='directory to parse (one directory per experiment)')
args = parser.parse_args()
print("DIR:{0}".format(args.dir))
return(args)
#plot the figures for cexample stats
def cexample_plot(flowStats_pd):
#Panda values (separating anycast and unicast)
print("-- Cexample flows")
#flowStats_pd = flowStats_pd[flowStats_pd['nbmotes'] < 200].reset_index()
#common sns config
sns.set_theme(style="ticks")
#PDR
plot = sns.violinplot(x='nbmotes', y='pdr', hue='sixtop_anycast',cut=0, legend_out=True, data=flowStats_pd)
plot.legend(handles=plot.legend_.legendHandles, labels=['without anycast', 'with anycast'])
plot.set_xlabel("Number of motes")
plot.set_ylabel("Packet Delivery Ratio")
#plot.set(yscale="log")
#plot.set(ylim=(1e-2,1))
plot.set(ylim=(0,1))
plot.figure.savefig("plots/cexample_pdr.pdf")
plot.set_xticks(np.arange(50, 100, 10))
plot.figure.clf()
#PDR
plot = sns.violinplot(x='nbmotes', y='delay_ms', hue='sixtop_anycast',cut=0,data=flowStats_pd)
plot.legend(handles=plot.legend_.legendHandles, labels=['without anycast', 'with anycast'])
plot.set_xlabel("Number of motes")
plot.set_ylabel("Delay (in ms)")
plot.figure.savefig("plots/cexample_delay.pdf")
plot.figure.clf()
#Efficiency
plot = sns.violinplot(x='nbmotes', y='nb_l2tx_raw', hue='sixtop_anycast',cut=0,data=flowStats_pd)
plot.legend(handles=plot.legend_.legendHandles, labels=['without anycast', 'with anycast'])
plot.set_xlabel("Number of motes")
plot.set_ylabel("Number of transmissions per message")
plot.figure.savefig("plots/cexample_nb_l2tx.pdf")
plot.figure.clf()
print("")
print("")
#plot the figures for cexample stats
def l2tx_plot(l2txStats_pd):
#Panda values (separating anycast and unicast)
print("-- l2txStats statistics")
print(l2txStats_pd)
#common sns config
sns.set_theme(style="ticks")
#PDR for acks vs. data packets
plot = sns.scatterplot(x='PDRData', y='PDRAck', data=l2txStats_pd)
plot.set_xlabel("Packet Delivery Ratio (data)")
plot.set_ylabel("Packet Delivery Ratio (ack)")
plot.figure.savefig("plots/l2tx_pdr_bidirect.pdf")
plot.figure.clf()
#hidden receiver problem
secondaryrx_pd = l2txStats_pd[(l2txStats_pd['priority_rx'] == 1)];
plot = sns.scatterplot(x='PDRData', y='RatioHiddenRx', data=secondaryrx_pd)
plot.set_xlabel("Packet Delivery Ratio (data)")
plot.set_ylabel("Ratio of False Negatives")
plot.set(yscale="log")
plot.set(ylim=(1e-3,1))
plot.figure.savefig("plots/l2tx_hidden_receiver_falseneg.pdf")
plot.figure.clf()
#CCA / SFD identification vs. link PDR
plot = sns.scatterplot(x='PDRData', y='intrpt_RatioCCA', data=secondaryrx_pd)
display(secondaryrx_pd)
plot.set_xlabel("Packet Delivery Ratio (data)")
plot.set_ylabel("Ratio CCA / Start of Frame interruptions")
plot.set(ylim=(0,1))
plot.figure.savefig("plots/l2tx_ratioCCA-SFD_PDR.pdf")
plot.figure.clf()
#CCAratio vs. hidden receivers
plot = sns.scatterplot(x='RatioHiddenRx', y='intrpt_RatioCCA', data=secondaryrx_pd)
display(secondaryrx_pd)
plot.set_xlabel("Ratio of false negatives ACK detection (hidden receivers)")
plot.set_ylabel("Ratio CCA / Start of Frame interruptions")
plot.set(ylim=(0,1))
plot.figure.savefig("plots/l2tx_ratioCCA-SFD_hiddenrx.pdf")
plot.figure.clf()
print("")
print("")
if __name__ == "__main__":
#no type 3 font
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
#parameters
args = init()
#init
flowStats = None
l2txStats = None
#prepare the stats for pandas
for experiment in os.listdir(args.dir):
json_filename = os.path.join(args.dir, experiment, "stats.json")
if os.path.isfile(json_filename) is True:
print(json_filename)
with open(json_filename) as json_file:
datafile = json.load(json_file)
#organizes the stats for cexample
flowStats = statsTools.cexample_compute_agg(experiment, datafile, flowStats)
#compute the stats for l2tx
l2txStats = statsTools.l2tx_compute(datafile, l2txStats)
cex_packets_pd = pd.DataFrame.from_dict(datafile['cex_packets'])
for index, row in cex_packets_pd[(cex_packets_pd['cex_src'] == '054332ff03d88982')].iterrows():
print("seqnum {}".format(row['seqnum']))
display(row['l2_transmissions'])
#plot the figures for cexample
flowStats_pd = pd.DataFrame.from_dict(flowStats)
display(flowStats_pd)
#cexample_plot(flowStats_pd)
#plot the figures for link stats
l2txStats_pd = | pd.DataFrame.from_dict(l2txStats) | pandas.DataFrame.from_dict |
''' Note at bottom '''
# Listing all the imports
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import imutils
import math
# pipeline is the module which has all the below functions written
import pipeline as ppl
# image height and image width ----> GLOBAL
img_ht = 512
img_wd = 512
path_toCollect = './test_images'
path_toSave = './prc_test_images'
train_data = pd.read_csv('sample_submission.csv')
newDataframe_cols = ['id_code','diagnosis']
trained_data = | pd.DataFrame(columns=newDataframe_cols) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns,
[('z', 'numeric'), ('a', 'categorical'),
('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids,
('c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(
md.ids, ('©id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('↩c@l1™', 'categorical'),
('col(#2)', 'categorical'),
("#col'3", 'categorical'),
('"<col_4>"', 'categorical'),
('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'),
('NA', 'numeric'),
('col3', 'categorical'),
('col4', 'categorical')])
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object,
name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'),
('1000', 'categorical'),
('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(
pd.DataFrame({'col0': [1.0, 2.0, 3.0],
'col1': ['a', 'b', 'c'],
'col2': ['foo', 'bar', '42'],
'col3': ['1.0', '2.5', '-4.002'],
'col4': [1, 2, 3],
'col5': [1, 2, 3.5],
'col6': [1e-4, -0.0002, np.nan],
'col7': ['cat', np.nan, 'dog'],
'col8': ['a', 'a', 'a'],
'col9': [0, 0, 0]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'),
('col1', 'categorical'),
('col2', 'categorical'),
('col3', 'categorical'),
('col4', 'numeric'),
('col5', 'numeric'),
('col6', 'numeric'),
('col7', 'categorical'),
('col8', 'categorical'),
('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'],
'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', ' bar ', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], ' col2 ': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
class TestSourceArtifacts(unittest.TestCase):
def setUp(self):
self.md = Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_source_artifacts(self):
self.assertEqual(self.md.artifacts, ())
def test_add_zero_artifacts(self):
self.md._add_artifacts([])
self.assertEqual(self.md.artifacts, ())
def test_add_artifacts(self):
# First two artifacts have the same data but different UUIDs.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
self.md._add_artifacts([artifact1])
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact3 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact2, artifact3])
self.assertEqual(self.md.artifacts, (artifact1, artifact2, artifact3))
def test_add_non_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
with self.assertRaisesRegex(TypeError, "Artifact object.*42"):
self.md._add_artifacts([artifact, 42])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, ())
def test_add_duplicate_artifact(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact2 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact1, artifact2])
with self.assertRaisesRegex(
ValueError, "Duplicate source artifacts.*artifact: Mapping"):
self.md._add_artifacts([artifact1])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, (artifact1, artifact2))
class TestRepr(unittest.TestCase):
def test_singular(self):
md = Metadata(pd.DataFrame({'col1': [42]},
index=pd.Index(['a'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 1 column', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
def test_plural(self):
md = Metadata(pd.DataFrame({'col1': [42, 42], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('2 IDs x 2 columns', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
self.assertIn("col2: ColumnProperties(type='categorical')", obs)
def test_column_name_padding(self):
data = [[0, 42, 'foo']]
index = pd.Index(['my-id'], name='id')
columns = ['col1', 'longer-column-name', 'c']
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 3 columns', obs)
self.assertIn(
"col1: ColumnProperties(type='numeric')", obs)
self.assertIn(
"longer-column-name: ColumnProperties(type='numeric')", obs)
self.assertIn(
"c: ColumnProperties(type='categorical')", obs)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index,
columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index,
columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2)
class TestToDataframe(unittest.TestCase):
def test_minimal(self):
df = pd.DataFrame({}, index=pd.Index(['id1'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='#SampleID'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.index.name, '#SampleID')
def test_dataframe_copy(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertIsNot(obs, df)
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.columns.tolist(), ['z', 'a', 'ch'])
def test_missing_data(self):
# Different missing data representations should be normalized to np.nan
index = pd.Index(['None', 'nan', 'NA', 'id1'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, float('nan'), 3]),
('NA', [np.nan, 'foo', float('nan'), None]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, np.nan, 3.0]),
('NA', [np.nan, 'foo', np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'NA': object, 'col3': object,
'col4': object})
self.assertTrue(np.isnan(obs['col1']['NA']))
self.assertTrue(np.isnan(obs['NA']['NA']))
self.assertTrue(np.isnan(obs['NA']['id1']))
def test_dtype_int_normalized_to_dtype_float(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
df = pd.DataFrame({'col1': [42, -43, 0],
'col2': [42.0, -43.0, 0.0],
'col3': [42, np.nan, 0]},
index=index)
self.assertEqual(df.dtypes.to_dict(),
{'col1': np.int64, 'col2': np.float64,
'col3': np.float64})
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame({'col1': [42.0, -43.0, 0.0],
'col2': [42.0, -43.0, 0.0],
'col3': [42.0, np.nan, 0.0]},
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'col2': np.float64,
'col3': np.float64})
class TestGetColumn(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_column_name_not_found(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
with self.assertRaisesRegex(ValueError,
"'col3'.*not a column.*'col1', 'col2'"):
md.get_column('col3')
def test_artifacts_are_propagated(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = A.view(Metadata)
obs = md.get_column('b')
exp = CategoricalMetadataColumn(
pd.Series(['3'], name='b', index=pd.Index(['0'], name='id')))
exp._add_artifacts([A])
self.assertEqual(obs, exp)
self.assertEqual(obs.artifacts, (A,))
def test_categorical_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col2')
exp = CategoricalMetadataColumn(
pd.Series(['foo', 'bar'], name='col2',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_numeric_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='#OTU ID'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['a', 'b'], name='#OTU ID')))
self.assertEqual(obs, exp)
self.assertEqual(obs.id_header, '#OTU ID')
class TestGetIDs(unittest.TestCase):
def test_default(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids()
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
def test_incomplete_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "Subject='subject-1' AND SampleType="
with self.assertRaises(ValueError):
metadata.get_ids(where)
where = "Subject="
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_invalid_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "not-a-column-name='subject-1'"
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_empty_result(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
def test_simple_expression(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
where = "Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1', 'S3'}
self.assertEqual(actual, expected)
where = "SampleType='tongue'"
actual = metadata.get_ids(where)
expected = {'S2'}
self.assertEqual(actual, expected)
def test_more_complex_expressions(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1' OR Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND Subject='subject-2'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1'}
self.assertEqual(actual, expected)
def test_query_by_id(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids(where="id='S2' OR id='S1'")
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
def test_query_by_alternate_id_header(self):
metadata = Metadata(pd.DataFrame(
{}, index=pd.Index(['id1', 'id2', 'id3'], name='#OTU ID')))
obs = metadata.get_ids(where="\"#OTU ID\" IN ('id2', 'id3')")
exp = {'id2', 'id3'}
self.assertEqual(obs, exp)
def test_no_columns(self):
metadata = Metadata(
pd.DataFrame({}, index=pd.Index(['a', 'b', 'my-id'], name='id')))
obs = metadata.get_ids()
exp = {'a', 'b', 'my-id'}
self.assertEqual(obs, exp)
def test_query_mixed_column_types(self):
df = pd.DataFrame({'Name': ['Foo', 'Bar', 'Baz', 'Baaz'],
# numbers that would sort incorrectly as strings
'Age': [9, 10, 11, 101],
'Age_Str': ['9', '10', '11', '101'],
'Weight': [80.5, 85.3, np.nan, 120.0]},
index=pd.Index(['S1', 'S2', 'S3', 'S4'], name='id'))
metadata = Metadata(df)
# string pattern matching
obs = metadata.get_ids(where="Name LIKE 'Ba_'")
exp = {'S2', 'S3'}
self.assertEqual(obs, exp)
# string comparison
obs = metadata.get_ids(where="Age_Str >= 11")
exp = {'S1', 'S3'}
self.assertEqual(obs, exp)
# numeric comparison
obs = metadata.get_ids(where="Age >= 11")
exp = {'S3', 'S4'}
self.assertEqual(obs, exp)
# numeric comparison with missing data
obs = metadata.get_ids(where="Weight < 100")
exp = {'S1', 'S2'}
self.assertEqual(obs, exp)
def test_column_with_space_in_name(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'Sample Type': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
metadata.get_ids()
# The list of captured warnings should be empty
self.assertFalse(w)
class TestMerge(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_merging_nothing(self):
md = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
with self.assertRaisesRegex(ValueError,
'At least one Metadata.*nothing to merge'):
md.merge()
def test_merging_two(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_three(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_unaligned_indices(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [9, 8, 7], 'd': [12, 11, 10]},
index=pd.Index(['id3', 'id2', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 15, 14], 'f': [16, 18, 17]},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_inner_join(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id2', 'X', 'Y'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['X', 'id3', 'id2'], name='id')))
# Single shared ID.
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [2], 'b': [5], 'c': [7], 'd': [10], 'e': [15], 'f': [18]},
index=pd.Index(['id2'], name='id')))
self.assertEqual(obs, exp)
# Multiple shared IDs.
obs = md1.merge(md3)
exp = Metadata(pd.DataFrame(
{'a': [2, 3], 'b': [5, 6], 'e': [15, 14], 'f': [18, 17]},
index=pd.Index(['id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_index_and_column_merge_order(self):
md1 = Metadata(pd.DataFrame(
[[1], [2], [3], [4]],
index=pd.Index(['id1', 'id2', 'id3', 'id4'], name='id'),
columns=['a']))
md2 = Metadata(pd.DataFrame(
[[5], [6], [7]], index=pd.Index(['id4', 'id3', 'id1'], name='id'),
columns=['b']))
md3 = Metadata(pd.DataFrame(
[[8], [9], [10]], index=pd.Index(['id1', 'id4', 'id3'], name='id'),
columns=['c']))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
[[1, 7, 8], [3, 6, 10], [4, 5, 9]],
index=pd.Index(['id1', 'id3', 'id4'], name='id'),
columns=['a', 'b', 'c']))
self.assertEqual(obs, exp)
# Merging in different order produces different ID/column order.
obs = md2.merge(md1, md3)
exp = Metadata(pd.DataFrame(
[[5, 4, 9], [6, 3, 10], [7, 1, 8]],
index=pd.Index(['id4', 'id3', 'id1'], name='id'),
columns=['b', 'a', 'c']))
self.assertEqual(obs, exp)
def test_id_column_only(self):
md1 = Metadata(pd.DataFrame({},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({},
index=pd.Index(['id2', 'X', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame({},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(
pd.DataFrame({}, index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merged_id_column_name(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2]},
index=pd.Index(['id1', 'id2'], name='sample ID')))
md2 = Metadata(pd.DataFrame(
{'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='feature ID')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merging_preserves_column_types(self):
# Test that column types remain the same even if a categorical column
# *could* be reinterpreted as numeric after the merge.
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3],
'b': [np.nan, np.nan, np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': ['1', 'foo', '3'],
'd': np.array([np.nan, np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id4', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 3], 'b': [np.nan, np.nan], 'c': ['1', '3'],
'd': np.array([np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id3'], name='id')))
self.assertEqual(obs, exp)
self.assertEqual(obs.columns['a'].type, 'numeric')
self.assertEqual(obs.columns['b'].type, 'numeric')
self.assertEqual(obs.columns['c'].type, 'categorical')
self.assertEqual(obs.columns['d'].type, 'categorical')
def test_no_artifacts(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2]}, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
{'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='id')))
metadata = md1.merge(md2)
self.assertEqual(metadata.artifacts, ())
def test_with_artifacts(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'d': '4'})
md_from_artifact1 = artifact1.view(Metadata)
md_from_artifact2 = artifact2.view(Metadata)
md_no_artifact = Metadata(pd.DataFrame(
{'c': ['3', '42']}, index=pd.Index(['0', '1'], name='id')))
# Merge three metadata objects -- the first has an artifact, the second
# does not, and the third has an artifact.
obs_md = md_from_artifact1.merge(md_no_artifact, md_from_artifact2)
exp_df = pd.DataFrame(
{'a': '1', 'b': '2', 'c': '3', 'd': '4'},
index=pd.Index(['0'], name='id'))
exp_md = Metadata(exp_df)
exp_md._add_artifacts((artifact1, artifact2))
self.assertEqual(obs_md, exp_md)
self.assertEqual(obs_md.artifacts, (artifact1, artifact2))
def test_disjoint_indices(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['X', 'Y', 'Z'], name='id')))
with self.assertRaisesRegex(ValueError, 'no IDs shared'):
md1.merge(md2)
def test_duplicate_columns(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [5, 6], 'b': [7, 8]},
index=pd.Index(['id1', 'id2'], name='id')))
with self.assertRaisesRegex(ValueError, "columns overlap: 'b'"):
md1.merge(md2)
def test_duplicate_columns_self_merge(self):
md = Metadata(pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]},
index=pd.Index(['id1', 'id2'], name='id')))
with self.assertRaisesRegex(ValueError, "columns overlap: 'a', 'b'"):
md.merge(md)
class TestFilterIDs(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_supports_iterable(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids(iter({'a', 'c'}))
exp = Metadata(pd.DataFrame(
{'col1': [1, 3], 'col2': ['foo', 'baz']},
index=pd.Index(['a', 'c'], name='id')))
self.assertEqual(obs, exp)
def test_keep_all(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids({'a', 'b', 'c'})
self.assertEqual(obs, md)
self.assertIsNot(obs, md)
def test_keep_multiple(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids({'a', 'c'})
exp = Metadata(pd.DataFrame(
{'col1': [1, 3], 'col2': ['foo', 'baz']},
index=pd.Index(['a', 'c'], name='id')))
self.assertEqual(obs, exp)
def test_keep_one(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
obs = md.filter_ids({'b'})
exp = Metadata(pd.DataFrame(
{'col1': [2], 'col2': ['bar']}, index=pd.Index(['b'], name='id')))
self.assertEqual(obs, exp)
def test_filtering_preserves_column_types(self):
# Test that column types remain the same even if a categorical column
# *could* be reinterpreted as numeric after the filter.
md = Metadata(pd.DataFrame(
{'a': [1, 2, 3],
'b': [np.nan, np.nan, np.nan],
'c': ['1', 'foo', '3'],
'd': np.array([np.nan, np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md.filter_ids({'id1', 'id3'})
exp = Metadata(pd.DataFrame(
{'a': [1, 3], 'b': [np.nan, np.nan], 'c': ['1', '3'],
'd': np.array([np.nan, np.nan], dtype=object)},
index=pd.Index(['id1', 'id3'], name='id')))
self.assertEqual(obs, exp)
self.assertEqual(obs.columns['a'].type, 'numeric')
self.assertEqual(obs.columns['b'].type, 'numeric')
self.assertEqual(obs.columns['c'].type, 'categorical')
self.assertEqual(obs.columns['d'].type, 'categorical')
def test_alternate_id_header(self):
md = Metadata(pd.DataFrame(
{'col1': [1, 2, 3, 4], 'col2': ['foo', 'bar', 'baz', 'bazz']},
index=pd.Index(['a', 'b', 'c', 'd'], name='#Sample ID')))
obs = md.filter_ids({'b', 'd'})
exp = Metadata(pd.DataFrame(
{'col1': [2, 4], 'col2': ['bar', 'bazz']},
index= | pd.Index(['b', 'd'], name='#Sample ID') | pandas.Index |
from pathlib import Path
import epimargin.plots as plt
import flat_table
import numpy as np
import pandas as pd
import seaborn as sns
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.etl.covid19india import state_code_lookup
from epimargin.models import SIR
from epimargin.smoothing import notched_smoothing
sns.set(style = "whitegrid")
# root = Path(__file__).parent
# data = root/"data"
data = Path("./data").resolve()
CI = 0.95
window = 14
gamma = 0.2
infectious_period = 5
smooth = notched_smoothing(window)
num_sims = 50000
# load admin data on population
IN_age_structure = { # WPP2019_POP_F01_1_POPULATION_BY_AGE_BOTH_SEXES
0: 116_880,
5: 117_982 + 126_156 + 126_046,
18: 122_505 + 117_397,
30: 112_176 + 103_460,
40: 90_220 + 79_440,
50: 68_876 + 59_256 + 48_891,
65: 38_260 + 24_091,
75: 15_084 + 8_489 + 3_531 + 993 + 223 + 48,
}
# normalize
age_structure_norm = sum(IN_age_structure.values())
IN_age_ratios = np.array([v/age_structure_norm for (k, v) in IN_age_structure.items()])
split_by_age = lambda v: (v * IN_age_ratios).astype(int)
# from Karnataka
COVID_age_ratios = np.array([0.01618736, 0.07107746, 0.23314877, 0.22946212, 0.18180406, 0.1882451 , 0.05852026, 0.02155489])
india_pop = pd.read_csv(data/"india_pop.csv", names = ["state", "population"], index_col = "state").to_dict()["population"]
india_pop["Odisha"] = india_pop["Orissa"]
india_pop["Puducherry"] = india_pop["Pondicherry"]
india_pop["Uttarakhand"] = india_pop["Uttaranchal"]
# load covid19 india data
download_data(data, 'timeseries.json', "https://api.covid19india.org/v3/")
with (data/'timeseries.json').open("rb") as fp:
df = flat_table.normalize(pd.read_json(fp)).fillna(0)
df.columns = df.columns.str.split('.', expand = True)
dates = np.squeeze(df["index"][None].values)
df = df.drop(columns = "index").set_index(dates).stack([1, 2]).drop("UN", axis = 1)
# load Rt data
# Rt = pd.read_csv("data/Rt_timeseries_india.csv")
# date = "2020-12-24"
# for state in set(df.loc[date, :, :].columns) - {"TT", "LA", "SK", "NL"}:
# for state in ["TN"]:
# N = india_pop[state_code_lookup[state].replace("&", "and")]
# T = df[state].loc[date, "total", "confirmed"]
# R = df[state].loc[date, "total", "recovered"]
# D = df[state].loc[date, "total", "deceased"]
# model = SIR(
# name = state,
# population = N - D,
# dT0 = np.ones(num_sims) * df[state].loc[date, "delta", "confirmed"],
# Rt0 = Rt[(Rt.state == state) & (Rt.date == date)].Rt.iloc[0],
# I0 = np.ones(num_sims) * (T - R - D),
# R0 = np.ones(num_sims) * R,
# D0 = np.ones(num_sims) * D,
# random_seed = 0
# )
# i = 0
# while np.mean(model.dT[-1]) > 0:
# model.parallel_forward_epi_step(num_sims = num_sims)
# i += 1
# print(state, i, np.mean(model.dT[-1]), np.std(model.dT[-1]))
# dT = np.array([_.mean().astype(int) for _ in model.dT])
# dTx = (dT * COVID_age_ratios[..., None]).astype(int)
# Tx = (T * COVID_age_ratios).astype(int)[..., None] + dTx.cumsum(axis = 1)
# Nx = split_by_age(N)
# lambda_x = dT/(Nx[..., None] - Tx)
# pd.DataFrame(lambda_x).to_csv(data/f"{state}_age_hazards.csv")
# pd.DataFrame(model.dT).T.to_csv(data/f"{state}_sims.csv")
######################
# sero scaling
TN_sero_breakdown = np.array([0.311, 0.311, 0.311, 0.320, 0.333, 0.320, 0.272, 0.253]) # from TN sero, assume 0-18 sero = 18-30 sero
TN_pop = india_pop["<NAME>"]
TN_seropos = split_by_age(TN_pop) @ TN_sero_breakdown/TN_pop
#KA_seropos = 0.467 # statewide from KA private survey
KA_seropos = 0.273 # statewide from KA govt survey
scaled_Rt = {
"TN": 0.9271785447646147,
# "KA": 1.1929944867195017
"KA": 0.9636985404892338
}
simulation_start = pd.Timestamp("Jan 1, 2021")
smoothing = notched_smoothing(14)
num_sims = 10000
for (state, date, seropos, sero_breakdown) in (
("TN", "October 23, 2020", TN_seropos, TN_sero_breakdown),
#("KA", "2020-07-22", KA_seropos, IN_age_ratios)
# ("KA", "2020-09-16", KA_seropos, IN_age_ratios),
):
N = india_pop[state_code_lookup[state].replace("&", "and")]
# scaling
dT_conf = df[state].loc[:, "delta", "confirmed"]
dT_conf_smooth = pd.Series(smoothing(dT_conf), index = dT_conf.index)
T_conf_smooth = dT_conf_smooth.cumsum().astype(int)
T = T_conf_smooth[date]
T_sero = (N * seropos)
T_ratio = T_sero/T
# grab time series
R = df[state].loc[simulation_start, "total", "recovered"]
D = df[state].loc[simulation_start, "total", "deceased"]
# run Rt estimation on scaled timeseries
(Rt_dates, Rt_est, *_) = analytical_MPVS(T_ratio * dT_conf_smooth, CI = CI, smoothing = lambda _:_, totals = False)
Rt = dict(zip(Rt_dates, Rt_est))
model = SIR(
name = state,
population = N,
dT0 = np.ones(num_sims) * (dT_conf_smooth[simulation_start] * T_ratio).astype(int),
Rt0 = Rt[simulation_start] * N/(N - T_sero),
I0 = np.ones(num_sims) * (T_sero - R - D),
R0 = np.ones(num_sims) * R,
D0 = np.ones(num_sims) * D,
random_seed = 0
)
i = 0
print(Rt[simulation_start], Rt[simulation_start] * N/(N - T_ratio * T_conf_smooth[simulation_start]))
while np.mean(model.dT[-1]) > 0:
model.parallel_forward_epi_step(num_sims = num_sims)
i += 1
print(state, i, np.mean(model.dT[-1]), np.std(model.dT[-1]))
# plot simulation
plt.scatter(dT_conf["April 1, 2020":simulation_start].index, dT_conf["April 1, 2020":simulation_start].values*T_ratio, label = "seroprevalence-scaled cases (pre-simulation)", color = "black", s = 5)
# plt.scatter(dT_conf[simulation_start:].index, dT_conf[simulation_start:].values*T_ratio, color = "grey", label = "seroprevalence-scaled cases (post-simulation)", s = 5)
# t = pd.Timestamp(date)
dates = pd.date_range(simulation_start, simulation_start + pd.Timedelta(len(model.dT) - 1, "days"))
# dates = pd.date_range(t, pd.Timestamp("April 1, 2021"))
n = len(dates)
plt.plot(dates, np.array([_.mean().astype(int) for _ in model.dT][:n]), label = "mean simulated daily cases", color = "rebeccapurple")
plt.fill_between(dates, [_.min().astype(int) for _ in model.dT][:n], [_.max().astype(int) for _ in model.dT][:n], label = "simulation range", alpha = 0.3, color = "rebeccapurple")
plt.vlines(pd.Timestamp(date), 1, 1e6, linestyles = "dashed", label = "date of seroprevalence study")
plt.legend(handlelength = 1, framealpha = 1)
plt.semilogy()
plt.xlim(pd.Timestamp("April 1, 2020"), dates[-1])
plt.ylim(1, 1e6)
plt.PlotDevice().xlabel("\ndate").ylabel("new daily cases\n").annotate("Daily Cases: Scaled Data & Simulation - Tamil Nadu, no vaccination")
plt.show()
# calculate hazards
dT = np.array([_.mean().astype(int) for _ in model.dT])
S = np.array([_.mean().astype(int) for _ in model.S])
dTx = (dT * sero_breakdown[..., None]).astype(int)
Sx = (S * COVID_age_ratios[..., None]).astype(int)
lambda_x = dTx/Sx
Pr_covid_t = np.zeros(lambda_x.shape)
Pr_covid_pre_t = np.zeros(lambda_x.shape)
Pr_covid_t[:, 0] = lambda_x[:, 0]
Pr_covid_pre_t[:, 0] = lambda_x[:, 0]
for t in range(1, len(lambda_x[0, :])):
Pr_covid_t[:, t] = lambda_x[:, t] * (1 - Pr_covid_pre_t[:, t-1])
Pr_covid_pre_t[:, t] = Pr_covid_pre_t[:, t-1] + lambda_x[:, t] * (1 - Pr_covid_pre_t[:, t-1])
plt.figure()
for _ in range(8):
plt.plot(Pr_covid_pre_t[_, :], label = f"agecat:{_}")
plt.title(f"{state}: Pr(covid before t) - Tamil Nadu, no vaccination")
plt.legend()
plt.show()
plt.figure()
for _ in range(8):
plt.plot(Pr_covid_t[_, :], label = f"agecat:{_}")
plt.title(f"{state}: Pr(covid at t) - Tamil Nadu, no vaccination")
plt.legend()
plt.show()
# Tx = (T * sero_breakdown).astype(int)[..., None] + dTx.cumsum(axis = 1)
# Nx = split_by_age(N)
# lambda_x = dTx/(Nx[..., None] - Tx)
| pd.DataFrame(lambda_x) | pandas.DataFrame |
"""
@Author: <NAME>
@Email: <EMAIL>
"""
import matplotlib.pyplot as plt
plt.switch_backend('agg')
plt.rcParams['axes.unicode_minus'] = False
import seaborn as sns
import pandas as pd
def simple_multi_line_plot(figpath, x_list, y_list, line_names=None, x_label=None, y_label=None, title=None):
"""
Args:
x_list (list): [array-like, ...]
y_list (list): [array-like, ...]
"""
lineNum = len(x_list)
line_names = ['line'+str(i) for i in range(lineNum)] if line_names is None else line_names
x_label = 'x' if x_label is None else x_label
y_label = 'y' if y_label is None else y_label
title = 'Simple Line Plot' if title is None else title
df = pd.concat([ | pd.DataFrame({x_label: x_list[i], y_label: y_list[i], 'lines': line_names[i]}) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/24 15:00
Desc: 生意社网站采集大宗商品现货价格及相应基差数据, 数据时间段从 20110104-至今
备注:现期差 = 现货价格 - 期货价格(这里的期货价格为结算价)
黄金为 元/克, 白银为 元/千克, 玻璃现货为 元/平方米, 鸡蛋现货为 元/公斤, 鸡蛋期货为 元/500千克, 其余为 元/吨.
焦炭现货规格是: 一级冶金焦; 焦炭期货规格: 介于一级和二级之间, 焦炭现期差仅供参考.
铁矿石现货价格是: 湿吨, 铁矿石期货价格是: 干吨
网页地址: http://www.100ppi.com/sf/
历史数据可以通过修改 url 地址来获取, 比如: http://www.100ppi.com/sf/day-2017-09-12.html
发现生意社的 bugs:
1. 2018-09-12 周三 数据缺失是因为生意社源数据在该交易日缺失: http://www.100ppi.com/sf/day-2018-09-12.html
"""
import datetime
import re
import time
import warnings
import pandas as pd
from akshare.futures import cons
from akshare.futures.requests_fun import pandas_read_html_link
from akshare.futures.symbol_var import chinese_to_english
calendar = cons.get_calendar()
def futures_spot_price_daily(start_day=None, end_day=None, vars_list=cons.contract_symbols):
"""
获取某段时间大宗商品现货价格及相应基差
:param start_day: str 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 默认为当天
:param end_day: str 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 默认为当天
:param vars_list: list 合约品种如 [RB, AL]; 默认参数为所有商品
:return: pandas.DataFrame
展期收益率数据:
var 商品品种 string
sp 现货价格 float
near_symbol 临近交割合约 string
near_price 临近交割合约结算价 float
dom_symbol 主力合约 string
dom_price 主力合约结算价 float
near_basis 临近交割合约相对现货的基差 float
dom_basis 主力合约相对现货的基差 float
near_basis_rate 临近交割合约相对现货的基差率 float
dom_basis_rate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
start_day = (
cons.convert_date(start_day) if start_day is not None else datetime.date.today()
)
end_day = (
cons.convert_date(end_day)
if end_day is not None
else cons.convert_date(cons.get_latest_data_date(datetime.datetime.now()))
)
df_list = []
while start_day <= end_day:
print(start_day)
temp_df = futures_spot_price(start_day, vars_list)
if temp_df is False:
return | pd.concat(df_list) | pandas.concat |
# External Libraries
from datetime import date
import pandas as pd
pd.options.mode.chained_assignment = None
import os
from pathlib import Path
import logging, coloredlogs
# Internal Libraries
import dicts_and_lists as dal
import Helper
# ------ Logger ------- #
logger = logging.getLogger('get_past_datasets.py')
coloredlogs.install(level='DEBUG')
folder = 'past_data/2017_2018/'
months = ['october', 'november', 'december', 'january', 'february', 'march', 'april', 'may', 'june']
for month in months:
url = 'https://www.basketball-reference.com/leagues/NBA_2018_games-'+ month + '.html'
df_url = pd.read_html(url)[0]
df_url = df_url.rename(columns=
{
'Visitor/Neutral' : 'AwayTeam',
'Home/Neutral' : 'HomeTeam',
'PTS' : 'AwayPoints',
'PTS.1' : 'HomePoints'
}
)
df_url = df_url.drop(['Unnamed: 6', 'Unnamed: 7', 'Attend.', 'Notes'], axis=1) # Remove non interesting columns
df_url = df_url.dropna(subset=['AwayPoints', 'HomePoints']) # Remove rows containing games not yet played
my_file = Path(os.getcwd() + '/' + folder + month + '_data.csv')
if not my_file.exists(): # If current data is not present in past_data folder, add it
df_url.to_csv(my_file, index=False) # Save the df as .csv
logger.info(f'An update has been made: {month}_data.csv has been created.')
logger.info(f'{month}_data.csv is up to date.')
# Create a big dataset
october_df = pd.read_csv(folder + 'october_data.csv')
november_df = pd.read_csv(folder + 'november_data.csv')
december_df = pd.read_csv(folder + 'december_data.csv')
january_df = pd.read_csv(folder + 'january_data.csv')
february_df = | pd.read_csv(folder + 'february_data.csv') | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: | pd.Timestamp("2013-04-13 00:00:00") | pandas.Timestamp |
import pandas as pd
import psycopg2
import pickle
import numpy as np
# counterS = 0
# global counterS
# global valGlob
# from sqlalchemy import create_engine
# -*- coding: utf-8 -*-
import os
import sys
import copy
# fileName = '/Users/alessandro/Documents/PhD/OntoHistory/WDTaxo_October2014.csv'
# connection parameters
def get_db_params():
params = {
'database': 'wikidb',
'user': 'postgres',
'password': '<PASSWORD>',
'host': 'localhost',
'port': '5432'
}
conn = psycopg2.connect(**params)
return conn
# create table
def create_table():
###statement table query
query_table = """CREATE TABLE IF NOT EXISTS tempData AS (SELECT p.itemId, p.revId, (p.timestamp::timestamp) AS tS, t.statementId, t.statProperty, t.statvalue FROM
(SELECT itemId, revId, timestamp FROM revisionData_201710) p, (SELECT revId, statementId, statProperty, statvalue FROM statementsData_201710 WHERE statProperty = 'P279' OR statProperty = 'P31') t
WHERE p.revId = t.revId)"""
queryStatData = """CREATE TABLE IF NOT EXISTS statementDated AS (SELECT p.itemid, p.statproperty, p.statvalue, p.statementid, p.revid, t.timestamp, t.username
FROM statementsData_201710 p LEFT JOIN revisionData_201710 t ON p.revid::int = t.revid::int);"""
conn = None
try:
conn = get_db_params()
cur = conn.cursor()
cur.execute(query_table)
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
conn = None
try:
conn = get_db_params()
cur = conn.cursor()
cur.execute(queryStatData)
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def queryexecutor():
# dictStats = {}
# conn = get_db_params()
# cur = conn.cursor()
for i in range(13, 18):
for j in range(1, 10):
date = "20" + str(i) + "-0" + str(j) + "-01"
if j == 1:
yr = i-1
datePrev = "20" + str(yr) + "-12-01"
else:
datePrev = "20" + str(i) + "-0" + str(j-1) + "-01"
print(date)
try:
queryStart = """
SELECT * INTO timetable_temp FROM revisionData_201710 WHERE (timestamp > '"""+ datePrev + """ 00:00:00' AND timestamp < '"""+ date + """ 00:00:00');
"""
conn = get_db_params()
cur = conn.cursor()
cur.execute(queryStart)
cur.close()
conn.commit()
queryBig = """
WITH revTempo AS (SELECT itemid, revid, timestamp, username FROM timetable_temp
WHERE (username NOT IN (SELECT bot_name FROM bot_list)
AND username !~ '([0-9]{1,3}[.]){3}[0-9]{1,3}|(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])[.]){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])[.]){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))'))
SELECT username, COUNT(*) AS noEdits, COUNT(DISTINCT itemid) AS itemDiv, (COUNT(*)/COUNT(DISTINCT itemid)::float) AS editRatio
FROM revTempo
GROUP BY username;
"""
#EXTRACT(EPOCH FROM ('2016-10-01 00:00:00'::timestamp - MIN(timestamp))) AS oldEdit,
# print(query)
df = pd.DataFrame()
for chunk in pd.read_sql(queryBig, con=conn, chunksize=1000):
df = df.append(chunk)
#columns: username, noEdits, itemDiv, editRatio
df['timeframe'] = date
queryOntoedit="""
SELECT username, COUNT(*) AS noOntoedit
FROM (SELECT * FROM timetable_temp WHERE itemId IN (SELECT DISTINCT statvalue FROM tempData) OR itemId IN (SELECT DISTINCT itemId FROM tempData WHERE statproperty != 'P31')) poopi
GROUP BY username;
"""
# print(query)
df_ontoedits = pd.DataFrame()
for chunk in pd.read_sql(queryOntoedit, con=conn, chunksize=1000):
df_ontoedits = df_ontoedits.append(chunk)
#columns: username, noOntoedit
df = df.merge(df_ontoedits, how='left')
queryPropedit="""
SELECT username, COUNT(*) AS noPropEdits
FROM timetable_temp WHERE itemId ~* '[P][0-9]{1,}'
GROUP BY username;
"""
# print(query)
df_Propedits = pd.DataFrame()
for chunk in pd.read_sql(queryPropedit, con=conn, chunksize=1000):
df_Propedits = df_Propedits.append(chunk)
#columns: username, noPropEdits
df = df.merge(df_Propedits, how='left')
queryCommedit="""
SELECT user_name AS username, COUNT(*) AS noCommEdits
FROM revision_pages_201710 WHERE (time_stamp > '"""+ datePrev + """ 00:00:00' AND time_stamp < '"""+ date + """ 00:00:00') AND (user_name NOT IN (SELECT bot_name FROM bot_list))
AND item_id !~* 'Property:P*'
GROUP BY user_name;
"""
# print(query)
df_Commedits = pd.DataFrame()
for chunk in pd.read_sql(queryCommedit, con=conn, chunksize=1000):
df_Commedits = df_Commedits.append(chunk)
#columns: username, noCommEdits
df = df.merge(df_Commedits, how='left')
queryTaxo = """
SELECT username, COUNT(*) AS noTaxoEdits
FROM statementDated WHERE (timestamp > '"""+ datePrev + """ 00:00:00' AND timestamp < '"""+ date + """ 00:00:00')
AND username NOT IN (SELECT bot_name FROM bot_list) AND (statProperty = 'P31' or statProperty = 'P279')
AND username !~ '([0-9]{1,3}[.]){3}[0-9]{1,3}|(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])[.]){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])[.]){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))')
GROUP BY username
"""
# print(query)
df_Taxedits = pd.DataFrame()
for chunk in pd.read_sql(queryTaxo, con=conn, chunksize=1000):
df_Taxedits = df_Taxedits.append(chunk)
#columns: username, noTaxoEdits
df = df.merge(df_Taxedits, how='left')
queryBatch = """
SELECT user_name AS username, COUNT(*) AS noBatchedit
FROM (SELECT * FROM revision_history_tagged WHERE automated_tool = 't'
AND (time_stamp > '"""+ datePrev + """ 00:00:00' AND time_stamp < '"""+ date + """ 00:00:00')) AS pippo
GROUP BY user_name
"""
# print(query)
df_Batchedits = | pd.DataFrame() | pandas.DataFrame |
"""
Created on 12:10 at 09/07/2021
@author: bo
"""
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import math
import time
import pickle
import seaborn as sns
from scipy.interpolate import interp1d
def get_spectrum(path):
with open(path, 'r') as f:
data = f.readlines()
data = data[10:-5]
data_array = [np.array(v.split("\n")[0].split(",")).astype("float32") for v in data]
return np.array(data_array)
def get_minerals_siamese(path):
with open(path, 'r') as f:
data = f.readlines()
data = np.array([v.split("\n")[0] for v in data])
return data
def convert_chemical(v):
c = v.replace("+^", "+}").replace("^", "^{")
cc = c.split("_")
for i, _cc in enumerate(cc):
try:
_dd = int(_cc[0])
_cc = "_{" + _cc + "}"
except ValueError:
if i != 0 and i != len(cc) - 1:
if "^" in _cc:
_cc = _cc + "}"
cc[i] = _cc
value = "".join(cc)
value = value.replace("·", '')
if "&#" in value:
value = value.split(" ")[0]
value = value.replace("-^{}", "}").replace("-^{", "}")
value = value.replace("}}", "}").replace("{{", "{") # .replace("-^{}", "}").replace("-^{", "}")
return value
def get_chemical(path):
with open(path, 'r') as f:
data = f.readlines()
ch = data[2].split("=")[1].split("\n")[0]
if ch[-1] == "_":
ch = ch[:-1]
ch = convert_chemical(ch)
return ch
def give_all_raw(path2read="../rs_dataset/", print_info=True):
siamese_minerals = get_minerals_siamese(path2read + "minerals.txt")
path_0 = path2read + "unrated_unoriented_unoriented.csv"
path_1 = path2read + "poor_unoriented_unoriented.csv"
path_2 = path2read + "fair_unoriented_unoriented.csv"
path_3 = path2read + "ignore_unoriented_unoriented.csv"
path_4 = path2read + "excellent_unoriented_unoriented_raw.csv"
for i, path in enumerate([path_0, path_1, path_2, path_3, path_4]):
_data = give_subset_of_spectrums(path, None, "raw", print_info=print_info)
if i == 0:
data = _data
else:
data = pd.concat([data, _data])
name = np.array([v.lower() for v in data.name.to_numpy()])
select_index = [iterr for iterr, v in enumerate(name) if v in siamese_minerals]
data_subset = data.iloc[select_index]
names = data_subset.name.to_numpy()
unique_name, unique_count = np.unique(names, return_counts=True)
if print_info:
print("There are %d unique minerals with %d spectra" % (len(unique_name), len(names)))
print("with the smallest number of spectra: %d and largest number of spectra %d" % (np.min(unique_count),
np.max(unique_count)))
return data_subset
def give_subset_of_spectrums(path, laser=None, raw_preprocess="raw", unrate=False, print_info=True):
"""Give a subset of spectrums
Args:
path: the csv file for all the data
laser: int, for the low resolution:either 532, 785 or 780, for the excellent resolution: 532, 780, 514, 785
raw_preprocess: int, either 0 or 1, where 1 corresponds to raw data and 0 corresponds to preprocessed data
"""
data = | pd.read_csv(path) | pandas.read_csv |
import tasks
import pandas as pd
from . import base
from typing import TypeVar, Generic, Dict
from hashlib import sha1
T = TypeVar(tasks.FetchAppleReportTask)
class AppleReportProcessor(Generic[T], base.ReportProcessor[T]):
@property
def added_columns(self) -> Dict[str, any]:
return {
**super().added_columns,
'org_id': self.task.org_id,
'org_name': self.task.org_name,
'converted_currency': self.task.currency,
'product': None,
'product_id': None,
'product_name': None,
'product_platform': None,
'product_os': None,
}
def process(self):
super().process()
report = self.task.report
if 'orgId' in report:
assert len(report.orgId.unique()) == 1 and report.orgId.unique()[0] == self.task.org_id
report.drop(columns='orgId', inplace=True) # we already store this off using our 'org_id' column
# map v2 column names to v1 column names
report.rename(
columns={
'installs': 'conversions',
'latOnInstalls': 'conversionsLATon',
'latOffInstalls': 'conversionsLAToff',
'newDownloads': 'conversionsNewDownloads',
'redownloads': 'conversionsRedownloads'
},
inplace=True
)
report['app_display_name'] = report.adamId.map(self.task.app_id_display_names)
if not self.task.keep_empty_app_display_names:
report.drop(report.index[report.app_display_name.isnull()], inplace=True)
if 'countriesOrRegions' in report:
report.drop(columns='countriesOrRegions', inplace=True)
if 'countryOrRegionServingStateReasons' in report:
report.drop(columns='countryOrRegionServingStateReasons', inplace=True)
report['platform'] = 'ios'
report['product_id'] = report.adamId.apply(lambda i: str(i) if not pd.isna(i) else None)
self.add_product_canonical_columns(report=report)
class AppleCreativeSetsReportProcessor(AppleReportProcessor):
def process(self):
super().process()
if self.task.report.empty:
return
self.task.report.adGroupCreativeSetId = self.task.report.adGroupCreativeSetId.astype( | pd.Int64Dtype() | pandas.Int64Dtype |
import random
import pandas as pd
from tqdm import tqdm
from shared.utils import make_dirs
from shared.utils import load_from_json
import sys
class Training_Data_Generator(object):
""" Class for generating ground-truth dataset used for feature learning
:param random_seed: parameter used for reproducibility
:param num_samples: total number of negative samples
:param neg_type: negative samples type (simple or hard)
:param query_type: query type (faq or user_query)
:param loss_type: the loss type as method used for BERT Fine-tuning (softmax or triplet loss)
:param hard_filepath: the absolut path to hard negatives filepath
"""
def __init__(self, random_seed=5, num_samples=24, neg_type='simple', query_type='faq',
loss_type='triplet', hard_filepath=''):
self.random_seed = random_seed
self.num_samples = num_samples
self.hard_filepath = hard_filepath
self.neg_type = neg_type
self.query_type = query_type
self.loss_type = loss_type
self.pos_labels = []
self.neg_labels = []
self.num_pos_labels = 0
self.num_neg_labels = 0
self.id2qa = dict()
self.id2negids = dict()
self.df = pd.DataFrame()
self.seq_len_df = pd.DataFrame()
self.df_pos = pd.DataFrame()
self.df_neg = pd.DataFrame()
if self.query_type == 'faq':
self.hard_filepath = self.hard_filepath + "/hard_negatives_faq.json"
elif self.query_type == "user_query":
self.hard_filepath = self.hard_filepath + "/hard_negatives_user_query.json"
else:
raise ValueError('error, no query_type found for {}'.format(query_type))
def generate_pos_labels(self, query_answer_pairs):
""" Generate positive labels from qa pairs
:param qa_pairs: list of dicts
:return: list of positive labels
"""
qap_df = pd.DataFrame.from_records(query_answer_pairs)
qap_by_query_type = qap_df[qap_df['query_type'] == self.query_type]
pos_labels = []
for _, row in qap_by_query_type.iterrows():
id = row['id']
qa_pair = {
"id": id,
"label": 1,
"question": row['question'],
"answer": row['answer'],
"query_type": row['query_type']
}
pos_labels.append(qa_pair)
self.id2qa[id] = (qa_pair['question'], qa_pair['answer'], qa_pair['query_type'])
return pos_labels
def get_id2negids(self, id2qa):
""" Generate random negative sample ids for qa pairs
:param id2qa: dictionary (id: key, question-answer (tuple): value)
:return: dictionary (id: key, neg_ids: value)
"""
random.seed(self.random_seed)
id2negids = dict()
total_qa = len(id2qa)
ids = id2qa.keys()
for id, qa in id2qa.items():
neg_ids = random.sample([x for x in ids if x != id and x !=0], self.num_samples)
id2negids[id] = neg_ids
return id2negids
def generate_neg_labels(self, id2negids):
""" Generate negative labels from id2negids
:param id2negids: dictionary (id: key, neg_ids: value)
:return: list of negative labels as dictionaries
"""
neg_labels = []
for k, v in id2negids.items():
for id in v:
neg_label = dict()
neg_label['id'] = str(k)
neg_label['question'] = self.id2qa[k][0]
neg_answer = self.id2qa[id][1]
neg_label['answer'] = neg_answer
neg_label['label'] = 0
neg_label['query_type'] = self.id2qa[id][2]
neg_labels.append(neg_label)
return neg_labels
def get_seq_len_df(self, query_answer_pairs):
""" Get sequence length in dataframe
"""
seq_len = []
for qa in tqdm(query_answer_pairs):
qa['q_len'] = len(qa['question'])
qa['a_len'] = len(qa['answer'])
seq_len.append(qa)
seq_len_df = | pd.DataFrame(seq_len) | pandas.DataFrame |
import bs4 as bs
from urllib.request import Request, urlopen
import pandas as pd
import os
import re
import sys
website = 'https://www.thecarconnection.com'
template = 'https://images.hgmsites.net/'
def fetch(page, addition=''):
return bs.BeautifulSoup(urlopen(Request(page + addition,
headers={'User-Agent': 'Opera/9.80 (X11; Linux i686; Ub'
'untu/14.10) Presto/2.12.388 Version/12.16'})).read(),
'lxml')
def all_makes():
all_makes_list = []
for a in fetch(website, "/new-cars").find_all("a", {"class": "add-zip"}):
all_makes_list.append(a['href'])
return all_makes_list
def make_menu(listed):
make_menu_list = []
for make in listed:
for div in fetch(website, make).find_all("div", {"class": "name"}):
make_menu_list.append(div.find_all("a")[0]['href'])
return make_menu_list
def model_menu(listed):
model_menu_list = []
for make in listed:
soup = fetch(website, make)
for div in soup.find_all("a", {"class": "btn avail-now first-item"}):
model_menu_list.append(div['href'])
for div in soup.find_all("a", {"class": "btn 1"})[:8]:
model_menu_list.append(div['href'])
model_menu_list = [i.replace('overview', 'specifications') for i in model_menu_list]
return model_menu_list
def specs_and_pics(listed):
picture_tab = [i.replace('specifications', 'photos') for i in listed]
specifications_table = pd.DataFrame()
for row, pic in zip(listed, picture_tab):
soup = fetch(website, row)
specifications_df = pd.DataFrame(columns=[soup.find_all("title")[0].text[:-15]])
try:
specifications_df.loc['Make', :] = soup.find_all('a', {'id': 'a_bc_1'})[0].text.strip()
specifications_df.loc['Model', :] = soup.find_all('a', {'id': 'a_bc_2'})[0].text.strip()
specifications_df.loc['Year', :] = soup.find_all('a', {'id': 'a_bc_3'})[0].text.strip()
specifications_df.loc['MSRP', :] = soup.find_all('span', {'class': 'msrp'})[0].text
except:
print('Problem with {}.'.format(website + row))
for div in soup.find_all("div", {"class": "specs-set-item"}):
row_name = div.find_all("span")[0].text
row_value = div.find_all("span")[1].text
specifications_df.loc[row_name] = row_value
fetch_pics_url = str(fetch(website, pic))
try:
for ix, photo in enumerate(re.findall('sml.+?_s.jpg', fetch_pics_url)[:150], 1):
specifications_df.loc[f'Picture {ix}', :] = photo.replace('\\', '')
specifications_table = pd.concat([specifications_table, specifications_df], axis=1, sort=False)
except:
print('Error with {}.'.format(template + photo))
return specifications_table
def run(directory):
os.chdir(directory)
a = all_makes()
b = make_menu(a)
c = model_menu(b)
pd.DataFrame(c).to_csv('c.csv', header=None)
d = | pd.read_csv('c.csv', index_col=0, header=None) | pandas.read_csv |
import itertools
from math import sqrt
from typing import List, Sequence
import torch
import torch.nn.functional as F
# import torch should be first. Unclear issue, mentionned here: https://github.com/pytorch/pytorch/issues/2083
import numpy as np
import os
import csv
import time
import heapq
import fiona # keep this import. it sets GDAL_DATA to right value
import rasterio
from PIL import Image
import torchvision
import ttach as tta
from collections import OrderedDict, defaultdict
import pandas as pd
import geopandas as gpd
from fiona.crs import to_string
from omegaconf.errors import ConfigKeyError
from tqdm import tqdm
from rasterio import features
from shapely.geometry import Polygon
from rasterio.windows import Window
from rasterio.plot import reshape_as_image
from pathlib import Path
from omegaconf.listconfig import ListConfig
from utils.logger import dict_path
from utils.metrics import ComputePixelMetrics
from models.model_choice import net
from utils import augmentation
from utils.geoutils import vector_to_raster, clip_raster_with_gpkg
from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, \
list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file
from utils.verifications import add_background_to_num_class, validate_num_classes, assert_crs_match
try:
import boto3
except ModuleNotFoundError:
pass
# Set the logging file
from utils import utils
logging = utils.get_logger(__name__)
def _pad_diff(arr, w, h, arr_shape):
""" Pads img_arr width or height < samples_size with zeros """
w_diff = arr_shape - w
h_diff = arr_shape - h
if len(arr.shape) > 2:
padded_arr = np.pad(arr, ((0, w_diff), (0, h_diff), (0, 0)), "constant", constant_values=np.nan)
else:
padded_arr = np.pad(arr, ((0, w_diff), (0, h_diff)), "constant", constant_values=np.nan)
return padded_arr
def _pad(arr, chunk_size):
""" Pads img_arr """
aug = int(round(chunk_size * (1 - 1.0 / 2.0)))
if len(arr.shape) > 2:
padded_arr = np.pad(arr, ((aug, aug), (aug, aug), (0, 0)), mode='reflect')
else:
padded_arr = np.pad(arr, ((aug, aug), (aug, aug)), mode='reflect')
return padded_arr
def ras2vec(raster_file, output_path):
# Create a generic polygon schema for the output vector file
i = 0
feat_schema = {'geometry': 'Polygon',
'properties': OrderedDict([('value', 'int')])
}
class_value_domain = set()
out_features = []
print(" - Processing raster file: {}".format(raster_file))
with rasterio.open(raster_file, 'r') as src:
raster = src.read(1)
mask = raster != 0
# Vectorize the polygons
polygons = features.shapes(raster, mask, transform=src.transform)
# Create shapely polygon featyres
for polygon in polygons:
feature = {'geometry': {
'type': 'Polygon',
'coordinates': None},
'properties': OrderedDict([('value', 0)])}
feature['geometry']['coordinates'] = polygon[0]['coordinates']
value = int(polygon[1]) # Pixel value of the class (layer)
class_value_domain.add(value)
feature['properties']['value'] = value
i += 1
out_features.append(feature)
print(" - Writing output vector file: {}".format(output_path))
num_layers = list(class_value_domain) # Number of unique pixel value
for num_layer in num_layers:
polygons = [feature for feature in out_features if feature['properties']['value'] == num_layer]
layer_name = 'vector_' + str(num_layer).rjust(3, '0')
print(" - Writing layer: {}".format(layer_name))
with fiona.open(output_path, 'w',
crs=to_string(src.crs),
layer=layer_name,
schema=feat_schema,
driver='GPKG') as dest:
for polygon in polygons:
dest.write(polygon)
print("")
print("Number of features written: {}".format(i))
def gen_img_samples(src, chunk_size, step, *band_order):
"""
Args:
src: input image (rasterio object)
chunk_size: image tile size
step: stride used during inference (in pixels)
*band_order: ignore
Returns: generator object
"""
for row in range(0, src.height, step):
for column in range(0, src.width, step):
window = Window.from_slices(slice(row, row + chunk_size),
slice(column, column + chunk_size))
if band_order:
window_array = reshape_as_image(src.read(band_order[0], window=window))
else:
window_array = reshape_as_image(src.read(window=window))
if window_array.shape[0] < chunk_size or window_array.shape[1] < chunk_size:
window_array = _pad_diff(window_array, window_array.shape[0], window_array.shape[1], chunk_size)
window_array = _pad(window_array, chunk_size)
yield window_array, row, column
@torch.no_grad()
def segmentation(param,
input_image,
label_arr,
num_classes: int,
gpkg_name,
model,
chunk_size: int,
device,
scale: List,
BGR_to_RGB: bool,
tp_mem,
debug=False,
):
"""
Args:
param: parameter dict
input_image: opened image (rasterio object)
label_arr: numpy array of label if available
num_classes: number of classes
gpkg_name: geo-package name if available
model: model weights
chunk_size: image tile size
device: cuda/cpu device
scale: scale range
BGR_to_RGB: True/False
tp_mem: memory temp file for saving numpy array to disk
debug: True/False
Returns:
"""
xmin, ymin, xmax, ymax = (input_image.bounds.left,
input_image.bounds.bottom,
input_image.bounds.right,
input_image.bounds.top)
xres, yres = (abs(input_image.transform.a), abs(input_image.transform.e))
mx = chunk_size * xres
my = chunk_size * yres
padded = chunk_size * 2
h = input_image.height
w = input_image.width
h_ = h + padded
w_ = w + padded
dist_samples = int(round(chunk_size * (1 - 1.0 / 2.0)))
# switch to evaluate mode
model.eval()
# initialize test time augmentation
transforms = tta.Compose([tta.HorizontalFlip(), ])
# construct window for smoothing
WINDOW_SPLINE_2D = _window_2D(window_size=padded, power=2.0)
WINDOW_SPLINE_2D = torch.as_tensor(np.moveaxis(WINDOW_SPLINE_2D, 2, 0), ).type(torch.float)
WINDOW_SPLINE_2D = WINDOW_SPLINE_2D.to(device)
fp = np.memmap(tp_mem, dtype='float16', mode='w+', shape=(h_, w_, num_classes))
sample = {'sat_img': None, 'map_img': None, 'metadata': None}
cnt = 0
subdiv = 2
step = int(chunk_size / subdiv)
total_inf_windows = int(np.ceil(input_image.height / step) * np.ceil(input_image.width / step))
img_gen = gen_img_samples(src=input_image,
chunk_size=chunk_size,
step=step)
start_seg = time.time()
print_log = True
for img in tqdm(img_gen, position=1, leave=False,
desc=f'Inferring on window slices of size {chunk_size}',
total=total_inf_windows):
row = img[1]
col = img[2]
sub_image = img[0]
image_metadata = add_metadata_from_raster_to_sample(sat_img_arr=sub_image,
raster_handle=input_image,
raster_info={})
sample['metadata'] = image_metadata
totensor_transform = augmentation.compose_transforms(param,
dataset="tst",
input_space=BGR_to_RGB,
scale=scale,
aug_type='totensor',
print_log=print_log)
sample['sat_img'] = sub_image
sample = totensor_transform(sample)
inputs = sample['sat_img'].unsqueeze_(0)
inputs = inputs.to(device)
if inputs.shape[1] == 4 and any("module.modelNIR" in s for s in model.state_dict().keys()):
############################
# Test Implementation of the NIR
############################
# Init NIR TODO: make a proper way to read the NIR channel
# and put an option to be able to give the idex of the NIR channel
# Extract the NIR channel -> [batch size, H, W] since it's only one channel
inputs_NIR = inputs[:, -1, ...]
# add a channel to get the good size -> [:, 1, :, :]
inputs_NIR.unsqueeze_(1)
# take out the NIR channel and take only the RGB for the inputs
inputs = inputs[:, :-1, ...]
# Suggestion of implementation
# inputs_NIR = data['NIR'].to(device)
inputs = [inputs, inputs_NIR]
# outputs = model(inputs, inputs_NIR)
############################
# End of the test implementation module
############################
output_lst = []
for transformer in transforms:
# augment inputs
augmented_input = transformer.augment_image(inputs)
augmented_output = model(augmented_input)
if isinstance(augmented_output, OrderedDict) and 'out' in augmented_output.keys():
augmented_output = augmented_output['out']
logging.debug(f'Shape of augmented output: {augmented_output.shape}')
# reverse augmentation for outputs
deaugmented_output = transformer.deaugment_mask(augmented_output)
deaugmented_output = F.softmax(deaugmented_output, dim=1).squeeze(dim=0)
output_lst.append(deaugmented_output)
outputs = torch.stack(output_lst)
outputs = torch.mul(outputs, WINDOW_SPLINE_2D)
outputs, _ = torch.max(outputs, dim=0)
outputs = outputs.permute(1, 2, 0)
outputs = outputs.reshape(padded, padded, num_classes).cpu().numpy().astype('float16')
outputs = outputs[dist_samples:-dist_samples, dist_samples:-dist_samples, :]
fp[row:row + chunk_size, col:col + chunk_size, :] = \
fp[row:row + chunk_size, col:col + chunk_size, :] + outputs
cnt += 1
fp.flush()
del fp
fp = np.memmap(tp_mem, dtype='float16', mode='r', shape=(h_, w_, num_classes))
pred_img = np.zeros((h_, w_), dtype=np.uint8)
for row, col in tqdm(itertools.product(range(0, input_image.height, step), range(0, input_image.width, step)),
leave=False,
total=total_inf_windows,
desc="Writing to array"):
arr1 = fp[row:row + chunk_size, col:col + chunk_size, :] / (2 ** 2)
arr1 = arr1.argmax(axis=-1).astype('uint8')
pred_img[row:row + chunk_size, col:col + chunk_size] = arr1
pred_img = pred_img[:h, :w]
end_seg = time.time() - start_seg
logging.info('Segmentation operation completed in {:.0f}m {:.0f}s'.format(end_seg // 60, end_seg % 60))
if debug:
logging.debug(f'Bin count of final output: {np.unique(pred_img, return_counts=True)}')
gdf = None
if label_arr is not None:
start_seg_ = time.time()
feature = defaultdict(list)
cnt = 0
for row in tqdm(range(0, h, chunk_size), position=2, leave=False):
for col in tqdm(range(0, w, chunk_size), position=3, leave=False):
label = label_arr[row:row + chunk_size, col:col + chunk_size]
pred = pred_img[row:row + chunk_size, col:col + chunk_size]
pixelMetrics = ComputePixelMetrics(label.flatten(), pred.flatten(), num_classes)
eval = pixelMetrics.update(pixelMetrics.iou)
feature['id_image'].append(gpkg_name)
for c_num in range(num_classes):
feature['L_count_' + str(c_num)].append(int(np.count_nonzero(label == c_num)))
feature['P_count_' + str(c_num)].append(int(np.count_nonzero(pred == c_num)))
feature['IoU_' + str(c_num)].append(eval['iou_' + str(c_num)])
feature['mIoU'].append(eval['macro_avg_iou'])
x_1, y_1 = (xmin + (col * xres)), (ymax - (row * yres))
x_2, y_2 = (xmin + ((col * xres) + mx)), y_1
x_3, y_3 = x_2, (ymax - ((row * yres) + my))
x_4, y_4 = x_1, y_3
geom = Polygon([(x_1, y_1), (x_2, y_2), (x_3, y_3), (x_4, y_4)])
feature['geometry'].append(geom)
feature['length'].append(geom.length)
feature['pointx'].append(geom.centroid.x)
feature['pointy'].append(geom.centroid.y)
feature['area'].append(geom.area)
cnt += 1
gdf = gpd.GeoDataFrame(feature, crs=input_image.crs)
end_seg_ = time.time() - start_seg_
logging.info('Benchmark operation completed in {:.0f}m {:.0f}s'.format(end_seg_ // 60, end_seg_ % 60))
input_image.close()
return pred_img, gdf
def classifier(params, img_list, model, device, working_folder):
"""
Classify images by class
:param params:
:param img_list:
:param model:
:param device:
:return:
"""
weights_file_name = params['inference']['state_dict_path']
num_classes = params['global']['num_classes']
bucket = params['global']['bucket_name']
classes_file = weights_file_name.split('/')[:-1]
if bucket:
class_csv = ''
for folder in classes_file:
class_csv = os.path.join(class_csv, folder)
bucket.download_file(os.path.join(class_csv, 'classes.csv'), 'classes.csv')
with open('classes.csv', 'rt') as file:
reader = csv.reader(file)
classes = list(reader)
else:
class_csv = ''
for c in classes_file:
class_csv = class_csv + c + '/'
with open(class_csv + 'classes.csv', 'rt') as f:
reader = csv.reader(f)
classes = list(reader)
classified_results = np.empty((0, 2 + num_classes))
for image in img_list:
img_name = os.path.basename(image['tif']) # TODO: pathlib
model.eval()
if bucket:
img = Image.open(f"Images/{img_name}").resize((299, 299), resample=Image.BILINEAR)
else:
img = Image.open(image['tif']).resize((299, 299), resample=Image.BILINEAR)
to_tensor = torchvision.transforms.ToTensor()
img = to_tensor(img)
img = img.unsqueeze(0)
with torch.no_grad():
img = img.to(device)
outputs = model(img)
_, predicted = torch.max(outputs, 1)
top5 = heapq.nlargest(5, outputs.cpu().numpy()[0])
top5_loc = []
for i in top5:
top5_loc.append(np.where(outputs.cpu().numpy()[0] == i)[0][0])
logging.info(f"Image {img_name} classified as {classes[0][predicted]}")
logging.info('Top 5 classes:')
for i in range(0, 5):
logging.info(f"\t{classes[0][top5_loc[i]]} : {top5[i]}")
classified_results = np.append(classified_results, [np.append([image['tif'], classes[0][predicted]],
outputs.cpu().numpy()[0])], axis=0)
csv_results = 'classification_results.csv'
if bucket:
np.savetxt(csv_results, classified_results, fmt='%s', delimiter=',')
bucket.upload_file(csv_results, os.path.join(working_folder, csv_results)) # TODO: pathlib
else:
np.savetxt(os.path.join(working_folder, csv_results), classified_results, fmt='%s', # TODO: pathlib
delimiter=',')
def calc_inference_chunk_size(gpu_devices_dict: dict, max_pix_per_mb_gpu: int = 200):
"""
Calculate maximum chunk_size that could fit on GPU during inference based on thumb rule with hardcoded
"pixels per MB of GPU RAM" as threshold. Threshold based on inference with a large model (Deeplabv3_resnet101)
:param gpu_devices_dict: dictionary containing info on GPU devices as returned by lst_device_ids (utils.py)
:param max_pix_per_mb_gpu: Maximum number of pixels that can fit on each MB of GPU (better to underestimate)
:return: returns a downgraded evaluation batch size if the original batch size is considered too high
"""
# get max ram for smallest gpu
smallest_gpu_ram = min(gpu_info['max_ram'] for _, gpu_info in gpu_devices_dict.items())
# rule of thumb to determine max chunk size based on approximate max pixels a gpu can handle during inference
max_chunk_size = sqrt(max_pix_per_mb_gpu * smallest_gpu_ram)
max_chunk_size_rd = int(max_chunk_size - (max_chunk_size % 256))
logging.info(f'Images will be split into chunks of {max_chunk_size_rd}')
return max_chunk_size_rd
def main(params: dict) -> None:
"""
Function to manage details about the inference on segmentation task.
1. Read the parameters from the config given.
2. Read and load the state dict from the previous training or the given one.
3. Make the inference on the data specifies in the config.
-------
:param params: (dict) Parameters found in the yaml config file.
"""
# since = time.time()
# PARAMETERS
mode = get_key_def('mode', params, expected_type=str)
task = get_key_def('task_name', params['task'], expected_type=str)
model_name = get_key_def('model_name', params['model'], expected_type=str).lower()
num_classes = len(get_key_def('classes_dict', params['dataset']).keys())
modalities = read_modalities(get_key_def('modalities', params['dataset'], expected_type=str))
BGR_to_RGB = get_key_def('BGR_to_RGB', params['dataset'], expected_type=bool)
num_bands = len(modalities)
debug = get_key_def('debug', params, default=False, expected_type=bool)
# SETTING OUTPUT DIRECTORY
try:
state_dict = Path(params['inference']['state_dict_path']).resolve(strict=True)
except FileNotFoundError:
logging.info(
f"\nThe state dict path directory '{params['inference']['state_dict_path']}' don't seem to be find," +
f"we will try to locate a state dict path in the '{params['general']['save_weights_dir']}' " +
f"specify during the training phase"
)
try:
state_dict = Path(params['general']['save_weights_dir']).resolve(strict=True)
except FileNotFoundError:
raise logging.critical(
f"\nThe state dict path directory '{params['general']['save_weights_dir']}'" +
f" don't seem to be find either, please specify the path to a state dict"
)
# TODO add more detail in the parent folder
working_folder = state_dict.parent.joinpath(f'inference_{num_bands}bands')
logging.info("\nThe state dict path directory used '{}'".format(working_folder))
Path.mkdir(working_folder, parents=True, exist_ok=True)
# LOGGING PARAMETERS TODO put option not just mlflow
experiment_name = get_key_def('project_name', params['general'], default='gdl-training')
try:
tracker_uri = get_key_def('uri', params['tracker'], default=None, expected_type=str)
Path(tracker_uri).mkdir(exist_ok=True)
run_name = get_key_def('run_name', params['tracker'], default='gdl') # TODO change for something meaningful
run_name = '{}_{}_{}'.format(run_name, mode, task)
logging.info(f'\nInference and log files will be saved to: {working_folder}')
# TODO change to fit whatever inport
from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics
# tracking path + parameters logging
set_tracking_uri(tracker_uri)
set_experiment(experiment_name)
start_run(run_name=run_name)
log_params(dict_path(params, 'general'))
log_params(dict_path(params, 'dataset'))
log_params(dict_path(params, 'data'))
log_params(dict_path(params, 'model'))
log_params(dict_path(params, 'inference'))
# meaning no logging tracker as been assigned or it doesnt exist in config/logging
except ConfigKeyError:
logging.info(
"\nNo logging tracker as been assigned or the yaml config doesnt exist in 'config/tracker'."
"\nNo tracker file will be save in that case."
)
# MANDATORY PARAMETERS
img_dir_or_csv = get_key_def(
'img_dir_or_csv_file', params['inference'], default=params['general']['raw_data_csv'], expected_type=str
)
if not (Path(img_dir_or_csv).is_dir() or Path(img_dir_or_csv).suffix == '.csv'):
raise logging.critical(
FileNotFoundError(
f'\nCouldn\'t locate .csv file or directory "{img_dir_or_csv}" containing imagery for inference'
)
)
# load the checkpoint
try:
# Sort by modification time (mtime) descending
sorted_by_mtime_descending = sorted(
[os.path.join(state_dict, x) for x in os.listdir(state_dict)], key=lambda t: -os.stat(t).st_mtime
)
last_checkpoint_save = find_first_file('checkpoint.pth.tar', sorted_by_mtime_descending)
if last_checkpoint_save is None:
raise FileNotFoundError
# change the state_dict
state_dict = last_checkpoint_save
except FileNotFoundError as e:
logging.error(f"\nNo file name 'checkpoint.pth.tar' as been found at '{state_dict}'")
raise e
task = get_key_def('task_name', params['task'], expected_type=str)
# TODO change it next version for all task
if task not in ['classification', 'segmentation']:
raise logging.critical(
ValueError(f'\nTask should be either "classification" or "segmentation". Got {task}')
)
# OPTIONAL PARAMETERS
dontcare_val = get_key_def("ignore_index", params["training"], default=-1, expected_type=int)
num_devices = get_key_def('num_gpus', params['training'], default=0, expected_type=int)
default_max_used_ram = 25
max_used_ram = get_key_def('max_used_ram', params['training'], default=default_max_used_ram, expected_type=int)
max_used_perc = get_key_def('max_used_perc', params['training'], default=25, expected_type=int)
scale = get_key_def('scale_data', params['augmentation'], default=[0, 1], expected_type=ListConfig)
raster_to_vec = get_key_def('ras2vec', params['inference'], False) # FIXME not implemented with hydra
# benchmark (ie when gkpgs are inputted along with imagery)
dontcare = get_key_def("ignore_index", params["training"], -1)
attribute_field = get_key_def('attribute_field', params['dataset'], None, expected_type=str)
attr_vals = get_key_def('attribute_values', params['dataset'], None, expected_type=Sequence)
if debug:
logging.warning(f'\nDebug mode activated. Some debug features may mobilize extra disk space and '
f'cause delays in execution.')
# Assert that all values are integers (ex.: to benchmark single-class model with multi-class labels)
if attr_vals:
for item in attr_vals:
if not isinstance(item, int):
raise ValueError(f'\nValue "{item}" in attribute_values is {type(item)}, expected int.')
logging.info(f'\nInferences will be saved to: {working_folder}\n\n')
if not (0 <= max_used_ram <= 100):
logging.warning(f'\nMax used ram parameter should be a percentage. Got {max_used_ram}. '
f'Will set default value of {default_max_used_ram} %')
max_used_ram = default_max_used_ram
# AWS
bucket = None
bucket_file_cache = []
bucket_name = get_key_def('bucket_name', params['AWS'])
# list of GPU devices that are available and unused. If no GPUs, returns empty dict
gpu_devices_dict = get_device_ids(num_devices,
max_used_ram_perc=max_used_ram,
max_used_perc=max_used_perc)
if gpu_devices_dict:
chunk_size = calc_inference_chunk_size(gpu_devices_dict=gpu_devices_dict, max_pix_per_mb_gpu=50)
logging.info(f"\nNumber of cuda devices requested: {num_devices}. "
f"\nCuda devices available: {gpu_devices_dict}. "
f"\nUsing {list(gpu_devices_dict.keys())[0]}\n\n")
device = torch.device(f'cuda:{list(range(len(gpu_devices_dict.keys())))[0]}')
else:
chunk_size = get_key_def('chunk_size', params['inference'], default=512, expected_type=int)
logging.warning(f"\nNo Cuda device available. This process will only run on CPU")
device = torch.device('cpu')
# CONFIGURE MODEL
num_classes_backgr = add_background_to_num_class(task, num_classes)
model, loaded_checkpoint, model_name = net(model_name=model_name,
num_bands=num_bands,
num_channels=num_classes_backgr,
dontcare_val=dontcare_val,
num_devices=1,
net_params=params,
inference_state_dict=state_dict)
try:
model.to(device)
except RuntimeError:
logging.info(f"\nUnable to use device. Trying device 0")
device = torch.device(f'cuda' if gpu_devices_dict else 'cpu')
model.to(device)
# CREATE LIST OF INPUT IMAGES FOR INFERENCE
try:
# check if the data folder exist
raw_data_dir = get_key_def('raw_data_dir', params['dataset'])
my_data_path = Path(raw_data_dir).resolve(strict=True)
logging.info("\nImage directory used '{}'".format(my_data_path))
data_path = Path(my_data_path)
except FileNotFoundError:
raw_data_dir = get_key_def('raw_data_dir', params['dataset'])
raise logging.critical(
"\nImage directory '{}' doesn't exist, please change the path".format(raw_data_dir)
)
list_img = list_input_images(
img_dir_or_csv, bucket_name, glob_patterns=["*.tif", "*.TIF"], in_case_of_path=str(data_path)
)
# VALIDATION: anticipate problems with imagery and label (if provided) before entering main for loop
valid_gpkg_set = set()
for info in tqdm(list_img, desc='Validating imagery'):
# validate_raster(info['tif'], num_bands, meta_map)
if 'gpkg' in info.keys() and info['gpkg'] and info['gpkg'] not in valid_gpkg_set:
validate_num_classes(vector_file=info['gpkg'],
num_classes=num_classes,
attribute_name=attribute_field,
ignore_index=dontcare,
attribute_values=attr_vals)
assert_crs_match(info['tif'], info['gpkg'])
valid_gpkg_set.add(info['gpkg'])
logging.info('\nSuccessfully validated imagery')
if valid_gpkg_set:
logging.info('\nSuccessfully validated label data for benchmarking')
if task == 'classification':
classifier(params, list_img, model, device,
working_folder) # FIXME: why don't we load from checkpoint in classification?
elif task == 'segmentation':
gdf_ = []
gpkg_name_ = []
# TODO: Add verifications?
if bucket:
bucket.download_file(loaded_checkpoint, "saved_model.pth.tar") # TODO: is this still valid?
model, _ = load_from_checkpoint("saved_model.pth.tar", model)
else:
model, _ = load_from_checkpoint(loaded_checkpoint, model)
# Save tracking TODO put option not just mlflow
if 'tracker_uri' in locals() and 'run_name' in locals():
mode = get_key_def('mode', params, expected_type=str)
task = get_key_def('task_name', params['task'], expected_type=str)
run_name = '{}_{}_{}'.format(run_name, mode, task)
# tracking path + parameters logging
set_tracking_uri(tracker_uri)
set_experiment(experiment_name)
start_run(run_name=run_name)
log_params(dict_path(params, 'inference'))
log_params(dict_path(params, 'dataset'))
log_params(dict_path(params, 'model'))
# LOOP THROUGH LIST OF INPUT IMAGES
for info in tqdm(list_img, desc='Inferring from images', position=0, leave=True):
img_name = Path(info['tif']).name
local_gpkg = Path(info['gpkg']) if 'gpkg' in info.keys() and info['gpkg'] else None
gpkg_name = local_gpkg.stem if local_gpkg else None
if bucket:
local_img = f"Images/{img_name}"
bucket.download_file(info['tif'], local_img)
inference_image = f"Classified_Images/{img_name.split('.')[0]}_inference.tif"
else:
local_img = Path(info['tif'])
Path.mkdir(working_folder.joinpath(local_img.parent.name), parents=True, exist_ok=True)
inference_image = working_folder.joinpath(local_img.parent.name,
f"{img_name.split('.')[0]}_inference.tif")
temp_file = working_folder.joinpath(local_img.parent.name, f"{img_name.split('.')[0]}.dat")
raster = rasterio.open(local_img, 'r')
logging.info(f'\nReading original image: {raster.name}')
inf_meta = raster.meta
label = None
if local_gpkg:
logging.info(f'\nBurning label as raster: {local_gpkg}')
local_img = clip_raster_with_gpkg(raster, local_gpkg)
raster.close()
raster = rasterio.open(local_img, 'r')
logging.info(f'\nReading clipped image: {raster.name}')
inf_meta = raster.meta
label = vector_to_raster(vector_file=local_gpkg,
input_image=raster,
out_shape=(inf_meta['height'], inf_meta['width']),
attribute_name=attribute_field,
fill=0, # background value in rasterized vector.
attribute_values=attr_vals)
if debug:
logging.debug(f'\nUnique values in loaded label as raster: {np.unique(label)}\n'
f'Shape of label as raster: {label.shape}')
pred, gdf = segmentation(param=params,
input_image=raster,
label_arr=label,
num_classes=num_classes_backgr,
gpkg_name=gpkg_name,
model=model,
chunk_size=chunk_size,
device=device,
scale=scale,
BGR_to_RGB=BGR_to_RGB,
tp_mem=temp_file,
debug=debug)
if gdf is not None:
gdf_.append(gdf)
gpkg_name_.append(gpkg_name)
if local_gpkg and 'tracker_uri' in locals():
pixelMetrics = ComputePixelMetrics(label, pred, num_classes_backgr)
log_metrics(pixelMetrics.update(pixelMetrics.iou))
log_metrics(pixelMetrics.update(pixelMetrics.dice))
pred = pred[np.newaxis, :, :].astype(np.uint8)
inf_meta.update({"driver": "GTiff",
"height": pred.shape[1],
"width": pred.shape[2],
"count": pred.shape[0],
"dtype": 'uint8',
"compress": 'lzw'})
logging.info(f'\nSuccessfully inferred on {img_name}\nWriting to file: {inference_image}')
with rasterio.open(inference_image, 'w+', **inf_meta) as dest:
dest.write(pred)
del pred
try:
temp_file.unlink()
except OSError as e:
logging.warning(f'File Error: {temp_file, e.strerror}')
if raster_to_vec:
start_vec = time.time()
inference_vec = working_folder.joinpath(local_img.parent.name,
f"{img_name.split('.')[0]}_inference.gpkg")
ras2vec(inference_image, inference_vec)
end_vec = time.time() - start_vec
logging.info('Vectorization completed in {:.0f}m {:.0f}s'.format(end_vec // 60, end_vec % 60))
if len(gdf_) >= 1:
if not len(gdf_) == len(gpkg_name_):
raise logging.critical(ValueError('\nbenchmarking unable to complete'))
all_gdf = | pd.concat(gdf_) | pandas.concat |
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Optional, List, Dict
from tqdm import tqdm
from .basic_predictor import BasicPredictor
from .utils import inverse_preprocess_data
from common_utils_dev import to_parquet, to_abs_path
COMMON_CONFIG = {
"data_dir": to_abs_path(__file__, "../../../storage/dataset/dataset/v001/train"),
"exp_dir": to_abs_path(__file__, "../../../storage/experiments/v001"),
"test_data_dir": to_abs_path(
__file__, "../../../storage/dataset/dataset/v001/test"
),
}
DATA_CONFIG = {
"checkpoint_dir": "./check_point",
"generate_output_dir": "./generated_output",
"base_feature_assets": ["BTC-USDT"],
}
MODEL_CONFIG = {
"lookback_window": 120,
"batch_size": 512,
"lr": 0.0001,
"epochs": 10,
"print_epoch": 1,
"print_iter": 50,
"save_epoch": 1,
"criterion": "l2",
"criterion_params": {},
"load_strict": False,
"model_name": "BackboneV1",
"model_params": {
"in_channels": 86,
"n_blocks": 5,
"n_block_layers": 10,
"growth_rate": 12,
"dropout": 0.1,
"channel_reduction": 0.5,
"activation": "tanhexp",
"normalization": "bn",
"seblock": True,
"sablock": True,
},
}
class PredictorV1(BasicPredictor):
"""
Functions:
train(): train the model with train_data
generate(save_dir: str): generate predictions & labels with test_data
predict(X: torch.Tensor): gemerate prediction with given data
"""
def __init__(
self,
data_dir=COMMON_CONFIG["data_dir"],
test_data_dir=COMMON_CONFIG["test_data_dir"],
d_config={},
m_config={},
exp_dir=COMMON_CONFIG["exp_dir"],
device="cuda",
pin_memory=False,
num_workers=8,
mode="train",
default_d_config=DATA_CONFIG,
default_m_config=MODEL_CONFIG,
):
super().__init__(
data_dir=data_dir,
test_data_dir=test_data_dir,
d_config=d_config,
m_config=m_config,
exp_dir=exp_dir,
device=device,
pin_memory=pin_memory,
num_workers=num_workers,
mode=mode,
default_d_config=default_d_config,
default_m_config=default_m_config,
)
def _invert_to_prediction(self, pred_abs_factor, pred_sign_factor):
multiply = ((pred_sign_factor >= 0.5) * 1.0) + ((pred_sign_factor < 0.5) * -1.0)
return pred_abs_factor * multiply
def _compute_train_loss(self, train_data_dict):
# Set train mode
self.model.train()
self.model.zero_grad()
# Set loss
pred_abs_factor, pred_sign_factor = self.model(
x=train_data_dict["X"], id=train_data_dict["ID"]
)
# Y loss
loss = self.criterion(pred_abs_factor, train_data_dict["Y"].view(-1).abs()) * 10
loss += self.binary_criterion(
pred_sign_factor, (train_data_dict["Y"].view(-1) >= 0) * 1.0
)
return (
loss,
self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
),
)
def _compute_test_loss(self, test_data_dict):
# Set eval mode
self.model.eval()
# Set loss
pred_abs_factor, pred_sign_factor = self.model(
x=test_data_dict["X"], id=test_data_dict["ID"]
)
# Y loss
loss = self.criterion(pred_abs_factor, test_data_dict["Y"].view(-1).abs()) * 10
loss += self.binary_criterion(
pred_sign_factor, (test_data_dict["Y"].view(-1) >= 0) * 1.0
)
return (
loss,
self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
),
)
def _step(self, train_data_dict):
loss, _ = self._compute_train_loss(train_data_dict=train_data_dict)
loss.backward()
self.optimizer.step()
return loss
def _display_info(self, train_loss, test_loss, test_predictions, test_labels):
pred_norm = test_predictions[test_predictions >= 0].abs().mean()
label_norm = test_labels[test_labels >= 0].abs().mean()
# Print loss info
print(
f""" [+] train_loss: {train_loss:.2f}, test_loss: {test_loss:.2f} | [+] pred_norm: {pred_norm:.2f}, label_norm: {label_norm:.2f}"""
)
def _build_abs_bins(self, df):
abs_bins = {}
for column in df.columns:
_, abs_bins[column] = pd.qcut(
df[column].abs(), 10, labels=False, retbins=True
)
abs_bins[column] = np.concatenate([[0], abs_bins[column][1:-1], [np.inf]])
return pd.DataFrame(abs_bins)
def _build_probabilities(self, pred_sign_factor):
return ((pred_sign_factor - 0.5) * 2).abs()
def train(self):
for epoch in range(self.model_config["epochs"]):
if epoch <= self.last_epoch:
continue
for iter_ in tqdm(range(len(self.train_data_loader))):
# Optimize
train_data_dict = self._generate_train_data_dict()
train_loss = self._step(train_data_dict=train_data_dict)
# Display losses
if epoch % self.model_config["print_epoch"] == 0:
if iter_ % self.model_config["print_iter"] == 0:
test_data_dict = self._generate_test_data_dict()
test_loss, test_predictions = self._compute_test_loss(
test_data_dict=test_data_dict
)
self._display_info(
train_loss=train_loss,
test_loss=test_loss,
test_predictions=test_predictions,
test_labels=test_data_dict["Y"],
)
# Store the check-point
if (epoch % self.model_config["save_epoch"] == 0) or (
epoch == self.model_config["epochs"] - 1
):
self._save_model(model=self.model, epoch=epoch)
def generate(self, save_dir=None):
assert self.mode in ("test")
self.model.eval()
if save_dir is None:
save_dir = self.data_config["generate_output_dir"]
# Mutate 1 min to handle logic, entry: open, exit: open
index = self.test_data_loader.dataset.index
index = index.set_levels(index.levels[0] + pd.Timedelta(minutes=1), level=0)
predictions = []
labels = []
probabilities = []
for idx in tqdm(range(len(self.test_data_loader))):
test_data_dict = self._generate_test_data_dict()
pred_abs_factor, pred_sign_factor = self.model(
x=test_data_dict["X"], id=test_data_dict["ID"]
)
preds = self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
)
predictions += preds.view(-1).cpu().tolist()
labels += test_data_dict["Y"].view(-1).cpu().tolist()
probabilities += (
self._build_probabilities(pred_sign_factor=pred_sign_factor)
.view(-1)
.cpu()
.tolist()
)
predictions = (
| pd.Series(predictions, index=index) | pandas.Series |
import math
import numpy as np
import pandas as pd
from typing import Any, Callable, Dict, List
import pydynamo_brain.analysis as pdAnalysis
from pydynamo_brain.model import FullState
# Provide the length of the branch, and the length to the last branch.
def branchLengths(fullState: FullState, branchIDList: List[str], **kwargs: Any) -> pd.DataFrame:
result = {}
for treeIdx, tree in enumerate(fullState.trees):
fullLengths, lastBranchLengths = [], []
for branchID in branchIDList:
branch = tree.getBranchByID(branchID)
if branch is None:
fullLengths.append(math.nan)
lastBranchLengths.append(math.nan)
else:
full, last = branch.worldLengths()
fullLengths.append(full)
lastBranchLengths.append(last)
result['length_%02d' % (treeIdx + 1)] = fullLengths
result['lengthToLastBranch_%02d' % (treeIdx + 1)] = lastBranchLengths
return pd.DataFrame(data=result, index=branchIDList).sort_index(axis=1)
# For each branch, calculate filo type, and add as an int.
# @see FiloTypes.py for mapping from that to meaning of the values.
def branchType(fullState: FullState, branchIDList: List[str], **kwargs: Any) -> pd.DataFrame:
nTrees = len(fullState.trees)
filoTypes, added, subtracted, transitioned, masterChanged, masterNodes = \
pdAnalysis.addedSubtractedTransitioned(fullState.trees, **kwargs)
intFiloTypes = filoTypes.astype(int)
colNames = [('branchType_%02d' % (i + 1)) for i in range(nTrees)]
return | pd.DataFrame(data=intFiloTypes.T, index=branchIDList, columns=colNames) | pandas.DataFrame |
from itertools import product
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.validation import quality_mapping
def test_ok_user_flagged():
assert quality_mapping.DESCRIPTION_MASK_MAPPING['OK'] == 0
assert quality_mapping.DESCRIPTION_MASK_MAPPING['USER FLAGGED'] == 1
def test_description_dict_version_compatibility():
for dict_ in quality_mapping.BITMASK_DESCRIPTION_DICT.values():
assert dict_['VERSION IDENTIFIER 0'] == 1 << 1
assert dict_['VERSION IDENTIFIER 1'] == 1 << 2
assert dict_['VERSION IDENTIFIER 2'] == 1 << 3
def test_latest_version_flag():
# test valid while only identifiers 0 - 2 present
last_identifier = max(
int(vi.split(' ')[-1]) for vi in
quality_mapping.DESCRIPTION_MASK_MAPPING.keys() if
vi.startswith('VERSION IDENTIFIER'))
assert last_identifier == 2
assert (quality_mapping.LATEST_VERSION_FLAG ==
quality_mapping.LATEST_VERSION << 1)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask(flag_val):
flag, mask = flag_val
mask |= quality_mapping.LATEST_VERSION_FLAG
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([
mask, mask, quality_mapping.LATEST_VERSION_FLAG, mask,
quality_mapping.LATEST_VERSION_FLAG]))
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_none(flag_invert):
assert quality_mapping.convert_bool_flags_to_flag_mask(
None, *flag_invert) is None
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_adds_latest_version(flag_invert):
ser = pd.Series([0, 0, 0, 1, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(
ser, *flag_invert)
assert (flags & quality_mapping.LATEST_VERSION_FLAG).all()
@pytest.fixture()
def ignore_latest_version(mocker):
mocker.patch(
'solarforecastarbiter.validation.quality_mapping.LATEST_VERSION_FLAG',
0)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([mask, mask, 0, mask, 0]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_no_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, False)
assert_series_equal(flags, pd.Series([0, 0, mask, 0, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return pd.Series([True, True, False, False])
out = f(_return_mask=True)
assert_series_equal(out, pd.Series([latest, latest, mask, mask]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags_tuple(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
@quality_mapping.mask_flags(flag)
def f():
return pd.Series([True, True, False, False]), None
out = f(_return_mask=True)
assert_series_equal(out[0], pd.Series([latest, latest, mask, mask]))
assert out[1] is None
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_mask_flags_noop(flag_val):
flag, mask = flag_val
latest = quality_mapping.LATEST_VERSION_FLAG
mask |= latest
inp = pd.Series([True, True, False, False])
@quality_mapping.mask_flags(flag)
def f():
return inp
out = f()
assert_series_equal(out, inp)
@pytest.mark.parametrize('flag,expected', [
(0b10, 1),
(0b11, 1),
(0b10010, 1),
(0b10010010, 1),
(0b100, 2),
(0b110, 3),
(0b1110001011111, 7)
])
def test_get_version(flag, expected):
assert quality_mapping.get_version(flag) == expected
def test_has_data_been_validated():
flags = pd.Series([0, 1, 2, 7])
out = quality_mapping.has_data_been_validated(flags)
assert_series_equal(out, pd.Series([False, False, True, True]))
@pytest.mark.parametrize('flag,desc,result', [
(0, 'OK', True),
(1, 'OK', False),
(2, 'OK', True),
(3, 'OK', False),
(0, 'USER FLAGGED', False),
(3, 'USER FLAGGED', True),
(0, 'CLEARSKY', False),
(16, 'OK', False),
(1, 'USER FLAGGED', True),
(16, 'NIGHTTIME', True),
(33, 'CLEARSKY', True),
(33, 'NIGHTTIME', False),
(33, ['OK', 'NIGHTTIME'], False),
(33, ('OK', 'CLEARSKY', 'USER FLAGGED'), True),
(2, ('OK', 'NIGHTTIME'), True),
(9297, 'USER FLAGGED', True)
])
def test_check_if_single_value_flagged(flag, desc, result):
flag |= quality_mapping.LATEST_VERSION_FLAG
out = quality_mapping.check_if_single_value_flagged(flag, desc)
assert out == result
@pytest.mark.parametrize('flag', [0, 1])
def test_check_if_single_value_flagged_validation_error(flag):
with pytest.raises(ValueError):
quality_mapping.check_if_single_value_flagged(flag, 'OK')
@pytest.mark.parametrize('desc', [33, b'OK', [1, 2], []])
def test_check_if_single_value_flagged_type_error(desc):
with pytest.raises(TypeError):
quality_mapping.check_if_single_value_flagged(2, desc)
@pytest.mark.parametrize('desc', ['NOPE', 'MAYBE', ['YES', 'NO']])
def test_check_if_single_value_flagged_key_error(desc):
with pytest.raises(KeyError):
quality_mapping.check_if_single_value_flagged(2, desc)
@pytest.mark.parametrize('flags,expected', [
(pd.Series([0, 1, 0]), pd.Series([False, False, False])),
(pd.Series([2, 2, 2]), pd.Series([True, True, True])),
(pd.Series([3, 2, 2]), pd.Series([False, True, True])),
(pd.Series([3, 34, 130]), pd.Series([False, False, False]))
])
def test_which_data_is_ok(flags, expected):
out = quality_mapping.which_data_is_ok(flags)
assert_series_equal(out, expected)
DESCRIPTIONS = ['USER FLAGGED', 'NIGHTTIME', 'CLEARSKY',
'SHADED', 'UNEVEN FREQUENCY', 'LIMITS EXCEEDED',
'CLEARSKY EXCEEDED', 'STALE VALUES', 'INTERPOLATED VALUES',
'CLIPPED VALUES', 'INCONSISTENT IRRADIANCE COMPONENTS',
'DAILY VALIDATION APPLIED']
DERIVED_DESCRIPTIONS = ['DAYTIME', 'DAYTIME STALE VALUES',
'DAYTIME INTERPOLATED VALUES']
@pytest.mark.parametrize('flag,expected', [
(2, pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
index=DESCRIPTIONS, dtype=bool)),
(3, pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
index=DESCRIPTIONS, dtype=bool)),
(35, pd.Series([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
index=DESCRIPTIONS, dtype=bool)),
(2 | 1 << 13 | 1 << 12 | 1 << 10,
pd.Series([0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0],
index=DESCRIPTIONS, dtype=bool))
])
def test_check_for_all_descriptions(flag, expected):
out = quality_mapping.check_for_all_descriptions(flag)
assert_series_equal(out, expected)
@pytest.mark.parametrize('flag', [0, 1])
def test_check_for_all_validation_fail(flag):
with pytest.raises(ValueError):
quality_mapping.check_for_all_descriptions(flag)
def test_convert_mask_into_dataframe():
flags = (pd.Series([0, 0, 1, 1 << 12, 1 << 9 | 1 << 7 | 1 << 5]) |
quality_mapping.LATEST_VERSION_FLAG)
columns = DESCRIPTIONS + ['NOT VALIDATED'] + DERIVED_DESCRIPTIONS
expected = pd.DataFrame([[0] * 13 + [1, 0, 0],
[0] * 13 + [1, 0, 0],
[1] + [0] * 12 + [1, 0, 0],
[0] * 9 + [1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0]],
columns=columns,
dtype=bool)
out = quality_mapping.convert_mask_into_dataframe(flags)
assert_frame_equal(out, expected)
def test_convert_mask_into_dataframe_w_unvalidated():
flags = (pd.Series([0, 0, 1, 1 << 12, 1 << 9 | 1 << 7 | 1 << 5]) |
quality_mapping.LATEST_VERSION_FLAG)
flags.iloc[0] = 0
columns = DESCRIPTIONS + ['NOT VALIDATED'] + DERIVED_DESCRIPTIONS
expected = pd.DataFrame([[0] * 12 + [1, 0, 0, 0],
[0] * 13 + [1, 0, 0],
[1] + [0] * 12 + [1, 0, 0],
[0] * 9 + [1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0]],
columns=columns,
dtype=bool)
out = quality_mapping.convert_mask_into_dataframe(flags)
assert_frame_equal(out, expected, check_like=True)
def test_convert_mask_into_dataframe_all_unvalidated():
flags = pd.Series([0, 0, 1, 1, 0])
columns = ['NOT VALIDATED']
expected = pd.DataFrame([[1]] * 5,
columns=columns,
dtype=bool)
out = quality_mapping.convert_mask_into_dataframe(flags)
assert_frame_equal(out, expected, check_like=True)
def test_convert_flag_frame_to_strings():
frame = pd.DataFrame({'FIRST': [True, False, False],
'SECOND': [False, False, True],
'THIRD': [True, False, True]})
expected = pd.Series(['FIRST, THIRD', 'OK', 'SECOND, THIRD'])
out = quality_mapping.convert_flag_frame_to_strings(frame)
assert_series_equal(expected, out)
@pytest.mark.parametrize('expected,desc', [
(pd.Series([1, 0, 0, 0], dtype=bool), 'OK'),
(pd.Series([0, 1, 0, 1], dtype=bool), 'USER FLAGGED'),
(pd.Series([0, 0, 1, 0], dtype=bool), 'CLEARSKY EXCEEDED'),
(pd.Series([0, 0, 0, 1], dtype=bool), 'CLEARSKY'),
(pd.Series([0, 0, 0, 1], dtype=bool), 'CLIPPED VALUES'),
(pd.Series([0, 0, 0, 0], dtype=bool), 'STALE VALUES'),
])
def test_check_if_series_flagged(expected, desc):
flags = | pd.Series([2, 3, 2 | 1 << 9, 2 | 1 << 5 | 1 << 12 | 1]) | pandas.Series |
from math import sqrt
import numpy as np
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import Classes.Configurations as cfg
import os
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
def partial_leasts_square_regression(x, y, train_split_percentage, file_name):
# Define X and Y matrix after cleaned by PCA and Mahalanobis distance
x_df = pd.DataFrame(x)
y_df = pd.DataFrame(y)
# split data to train and test
x_test, x_train, y_test, y_train = train_test_split(x_df, y_df, test_size=train_split_percentage, random_state=0)
# Train one PLS model for each Y parameter
parameters = len(y_df.columns)
models = []
rmsec = []
r2cal = []
rmsecv = []
r2cv = []
for i in range(parameters):
if cfg.sigma_detection:
x_sigma, y_sigma = do_sigma_pls(x_df, y_df.iloc[:, i], train_split_percentage)
if cfg.polarization_test:
x_sigma_r, y_sigma_r = polarization_reducer_by_amplitude_groups(x_sigma, y_sigma)
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_sigma_r, y_sigma_r, train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
med_x_pred_polarized(x_sigma_r, y_sigma_r, i_rmsec, i_r2c, i_rmsecv, i_r2cv, i_model,
file_name + '\Figures\sigma_' + y_df.columns[i], i + 200, y_df.columns[i])
sigma_data_to_excel(file_name + '\SigmaReport_' + y_df.columns[i],
pd.concat([x_sigma_r, y_sigma_r], axis=1, sort=False))
else:
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_sigma, y_sigma, train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
med_x_pred_sigma(x_sigma, y_sigma, i_rmsec, i_r2c, i_rmsecv, i_r2cv, i_model, file_name + '\Figures\sigma_' + y_df.columns[i], i + 200, y_df.columns[i])
sigma_data_to_excel(file_name + '\SigmaReport_' + y_df.columns[i], pd.concat([x_sigma, y_sigma], axis=1, sort=False))
else:
if cfg.polarization_test:
x_df_r, y_df_r = polarization_reducer_by_amplitude_groups(x_df, y_df.iloc[:, i])
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_df_r, y_df_r, train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
med_x_pred_polarized(x_df_r, y_df_r, i_rmsec, i_r2c, i_rmsecv, i_r2cv, i_model,
file_name + '\Figures\polarized_' + y_df.columns[i], i + 200, y_df.columns[i])
else:
i_model, i_rmsec, i_r2c, i_rmsecv, i_r2cv = do_pls(x_df, y_df.iloc[:, i], train_split_percentage)
models.append(i_model)
rmsec.append(i_rmsec)
r2cal.append(i_r2c)
rmsecv.append(i_rmsecv)
r2cv.append(i_r2cv)
df_models_summary = pd.DataFrame(
pd.concat([pd.DataFrame(list(y_df.columns)), pd.DataFrame(rmsec), pd.DataFrame(r2cal), pd.DataFrame(rmsecv), pd.DataFrame(r2cv)], axis=1))
s = pd.Series(['Parameter', 'RMSEC', 'R2CAL', 'RMSECV', 'R2CV'])
df_models_summary = df_models_summary.transpose().set_index(s)
df_y_resume = pd.DataFrame(y_df.describe().dropna())
df_indexes = pd.DataFrame(pd.concat([df_y_resume, df_models_summary], axis=0))
return df_indexes, df_models_summary, df_y_resume, models, x_train, y_train, x_test, y_test
def do_pls(data_x, data_y, train_split_percentage):
latent_variables = []
x_test, x_train, y_test, y_train = train_test_split(data_x, data_y, test_size=train_split_percentage, random_state=0)
for i in range(20):
pls = PLSRegression(n_components=(i + 1), scale=True)
pls.fit(x_train, y_train)
predicted_cv_y = pls.predict(x_test)
mean_squared_error_cv = sqrt(mean_squared_error(y_test, predicted_cv_y))
latent_variables.append(mean_squared_error_cv)
best_factor = np.argmin(latent_variables)
pls2 = PLSRegression(n_components=(best_factor + 1), scale=True)
pls2.fit(x_train, y_train)
predicted_cal = pls2.predict(x_train)
rmsec = sqrt(mean_squared_error(y_train, predicted_cal))
r2c = pls2.score(x_train, y_train)
predicted_cv_y = pls2.predict(x_test)
rmsecv = sqrt(mean_squared_error(y_test, predicted_cv_y))
r2v = pls2.score(x_test, y_test)
plsfinal = PLSRegression(n_components=(best_factor + 1), scale=True)
plsfinal.fit(data_x, data_y)
return plsfinal, rmsec, r2c, rmsecv, r2v
def do_sigma_pls(data_x, data_y, train_split_percentage):
latent_variables = []
x_test, x_train, y_test, y_train = train_test_split(data_x, data_y, test_size=train_split_percentage, random_state=0)
for i in range(20):
pls = PLSRegression(n_components=(i + 1), scale=True)
pls.fit(x_train, y_train)
predicted_cv_y = pls.predict(x_test)
mean_squared_error_cv = sqrt(mean_squared_error(y_test, predicted_cv_y))
latent_variables.append(mean_squared_error_cv)
best_factor = np.argmin(latent_variables)
pls_sigma = PLSRegression(n_components=(best_factor + 1), scale=True)
pls_sigma.fit(data_x, data_y)
predicted_cv_y_sigma = pd.DataFrame(pls_sigma.predict(data_x))
data_labels = pd.DataFrame(data_y.index)
data_x = pd.DataFrame(data_x).reset_index(drop=True)
data_y = pd.DataFrame(data_y).reset_index(drop=True)
if cfg.sigma_percentage:
percentual_error = pd.DataFrame(abs(data_y.iloc[:, 0] - predicted_cv_y_sigma.iloc[:, 0]))
percentual_error = pd.DataFrame((percentual_error.iloc[:, 0] * 100) / data_y.iloc[:, 0])
df_x = pd.DataFrame(pd.DataFrame(pd.concat([data_x, percentual_error], axis=1)))
df_x = df_x.drop(df_x[df_x.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.drop(df_x.columns[len(df_x.columns) - 1], axis=1, inplace=True)
df_y = pd.DataFrame(pd.DataFrame(pd.concat([data_y, data_labels, percentual_error], axis=1)))
df_y = df_y.drop(df_y[df_y.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.set_index(df_y.iloc[:, 1], inplace=True)
df_y.set_index(df_x.index, inplace=True)
df_y.drop(df_y.columns[len(df_y.columns) - 1], axis=1, inplace=True)
return df_x, df_y
else:
abs_error = pd.DataFrame(abs(data_y.iloc[:, 0] - predicted_cv_y_sigma.iloc[:, 0]))
df_x = pd.DataFrame(pd.DataFrame(pd.concat([data_x, abs_error], axis=1)))
df_x = df_x.drop(df_x[df_x.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.drop(df_x.columns[len(df_x.columns) - 1], axis=1, inplace=True)
df_y = pd.DataFrame(pd.DataFrame(pd.concat([data_y, abs_error], axis=1)))
df_y = df_y.drop(df_y[df_y.iloc[:, -1] > cfg.sigma_confidence].index)
df_x.set_index(df_y.iloc[:, 1], inplace=True)
df_y.set_index(df_x.index, inplace=True)
df_y.drop(df_y.columns[len(df_y.columns) - 1], axis=1, inplace=True)
return df_x, df_y
def run_pls(x, model):
y_hat = model.predict(x)
return y_hat
def remove_rows_with_zeros(x, y):
df_x = | pd.DataFrame(x) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": | pandas.StringDtype() | pandas.StringDtype |
# Python infrastrukturelementer
import subprocess
import sys
from typing import Dict, List, Set, Tuple, IO
from enum import IntEnum
# Kommandolinje- og databasehåndtering
import click
import fire.cli
from fire.cli import firedb
from fire.api.model import (
# Typingelementer fra databaseAPIet:
Koordinat,
Punkt,
PunktInformation,
PunktInformationType,
Sag,
Sagsevent,
Sagsinfo,
Srid,
)
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import NoResultFound
# Beregning
import numpy as np
import statsmodels.api as sm
from math import sqrt
from pyproj import Proj
from scipy import stats
# Datahåndtering
import pandas as pd
import xlsxwriter
import xmltodict
from datetime import datetime
# ------------------------------------------------------------------------------
@click.group()
def mtl():
"""Motoriseret trigonometrisk nivellement: Arbejdsflow, beregning og analyse"""
pass
# ------------------------------------------------------------------------------
def get_observation_strings(
filinfo: List[Tuple[str, float]], verbose: bool = False
) -> List[str]:
"""Pil observationsstrengene ud fra en række råfiler"""
kol = IntEnum(
"kol",
"fra til dato tid L dH journal T setups sky sol vind sigt kommentar",
start=0,
)
observationer = list()
for fil in filinfo:
filnavn = fil[0]
spredning = fil[1]
if verbose:
print("Læser " + filnavn + " med spredning ", spredning)
try:
with open(filnavn, "rt", encoding="utf-8") as obsfil:
for line in obsfil:
if "#" != line[0]:
continue
line = line.lstrip("#").strip()
# Check at observationen er i et af de kendte formater
tokens = line.split(" ", 13)
assert len(tokens) in (9, 13, 14), (
"Malform input line: " + line + " i fil: " + filnavn
)
# Bring observationen på kanonisk 14-feltform.
for i in range(len(tokens), 13):
tokens.append(0)
if len(tokens) < 14:
tokens.append('""')
tokens[13] = tokens[13].lstrip('"').strip().rstrip('"')
# Korriger de rædsomme dato/tidsformater
tid = " ".join((tokens[kol.dato], tokens[kol.tid]))
try:
isotid = datetime.strptime(tid, "%d.%m.%Y %H.%M")
except ValueError:
sys.exit(
"Argh - ikke-understøttet datoformat: '"
+ tid
+ "' i fil: "
+ filnavn
)
# Reorganiser søjler og omsæt numeriske data fra strengrepræsentation til tal
reordered = [
tokens[kol.journal],
tokens[kol.fra],
tokens[kol.til],
float(tokens[kol.dH]),
float(tokens[kol.L]),
int(tokens[kol.setups]),
spredning,
tokens[kol.kommentar],
isotid,
float(tokens[kol.T]),
int(tokens[kol.sky]),
int(tokens[kol.sol]),
int(tokens[kol.vind]),
int(tokens[kol.sigt]),
filnavn,
]
observationer.append(reordered)
except FileNotFoundError:
print("Kunne ikke læse filen '" + filnavn + "'")
return observationer
# ------------------------------------------------------------------------------
def punkt_information(ident: str) -> PunktInformation:
"""Find alle informationer for et fikspunkt"""
pi = aliased(PunktInformation)
pit = aliased(PunktInformationType)
try:
punktinfo = (
firedb.session.query(pi)
.filter(pit.name.startswith("IDENT:"), pi.tekst == ident)
.first()
)
except NoResultFound:
fire.cli.print(f"Error! {ident} not found!", fg="red", err=True)
sys.exit(1)
if punktinfo is not None:
fire.cli.print(f"Fandt {ident}", fg="green", err=False)
else:
fire.cli.print(f"Fandt ikke {ident}", fg="cyan", err=False)
return punktinfo
# ------------------------------------------------------------------------------
def punkt_kote(punktinfo: PunktInformation, koteid: int) -> Koordinat:
"""Find aktuelle koordinatværdi for koordinattypen koteid"""
if punktinfo is None:
return None
for koord in punktinfo.punkt.koordinater:
if koord.sridid != koteid:
continue
if koord.registreringtil is None:
return koord
return None
# ------------------------------------------------------------------------------
def punkt_geometri(punktinfo: PunktInformation, ident: str) -> Tuple[float, float]:
"""Find placeringskoordinat for punkt"""
if punktinfo is None:
return (11, 56)
try:
geom = firedb.hent_geometri_objekt(punktinfo.punktid)
# Turn the string "POINT (lon lat)" into the tuple "(lon, lat)"
geo = eval(str(geom.geometri).lstrip("POINT").strip().replace(" ", ","))
# TODO: Perhaps just return (56,11) Kattegat pain instead
assert len(geo) == 2, "Bad geometry format: " + str(geom.geometri)
except NoResultFound:
fire.cli.print(f"Error! Geometry for {ident} not found!", fg="red", err=True)
sys.exit(1)
return geo
# ------------------------------------------------------------------------------
# TODO: Bør nok være en del af API
# ------------------------------------------------------------------------------
def hent_sridid(db, srid: str) -> int:
srider = db.hent_srider()
for s in srider:
if s.name == srid:
return s.sridid
# TODO: kast en undtagelse (throw an exception)
return 0
# ------------------------------------------------------------------------------
def find_path(graph: Dict[str, Set[str]], start: str, end: str, path=[]):
"""
Mikroskopisk backtracking netkonnektivitetstest. Baseret på et
essay af GvR fra https://www.python.org/doc/essays/graphs/, men
her moderniseret fra Python 1.5 til 3.7 og modificeret til
at arbejde på dict-over-set (originalen brugte dict-over-list)
"""
path = path + [start]
if start == origin:
return path
if start not in graph:
return None
for node in graph[start]:
if node not in path:
newpath = path_to_origin(graph, node, origin, path)
if newpath:
return newpath
return None
# ------------------------------------------------------------------------------
# Eksempel:
#
# graph = {
# 'A': {'B', 'C'},
# 'B': {'C', 'D'},
# 'C': {'D'},
# 'D': {'C'},
# 'E': {'F'},
# 'F': {'C'},
# 'G': {}
# }
#
# print (path_to_origin (graph, 'A', 'C'))
# print (path_to_origin (graph, 'A', 'G'))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def find_nyetablerede(projektnavn):
"""Opbyg oversigt over nyetablerede punkter"""
print("Finder nyetablerede punkter")
try:
nyetablerede = pd.read_excel(
projektnavn + ".xlsx",
sheet_name="Nyetablerede punkter",
usecols="A:E",
dtype={
"Foreløbigt navn": np.object,
"Endeligt navn": np.object,
"φ": np.float64,
"λ": np.float64,
"Foreløbig kote": np.float64,
},
)
except:
nyetablerede = pd.DataFrame(
columns=["Foreløbigt navn", "Endeligt navn", "φ", "λ", "Foreløbig kote"],
)
assert nyetablerede.shape[0] == 0, "Forventede tom dataframe"
# Sæt 'Foreløbigt navn'-søjlen som index, så vi kan adressere
# som nyetablerede.at[punktnavn, elementnavn]
return nyetablerede.set_index("Foreløbigt navn")
# ------------------------------------------------------------------------------
def find_inputfiler(navn)-> List[Tuple[str, float]]:
"""Opbyg oversigt over alle input-filnavne og deres tilhørende spredning"""
try:
inputfiler = pd.read_excel(
navn + ".xlsx", sheet_name="Filoversigt", usecols="C:E"
)
except:
sys.exit("Kan ikke finde filoversigt i projektfil")
inputfiler = inputfiler[inputfiler["Filnavn"].notnull()] # Fjern blanklinjer
filnavne = inputfiler["Filnavn"]
spredning = inputfiler["σ"]
assert len(filnavne) > 0, "Ingen inputfiler anført"
return list(zip(filnavne, spredning))
# ------------------------------------------------------------------------------
def importer_observationer():
"""Opbyg dataframe med observationer importeret fra rådatafil"""
print("Importerer observationer")
observationer = pd.DataFrame(
get_observation_strings(find_inputfiler()),
columns=[
"journal",
"fra",
"til",
"dH",
"L",
"opst",
"σ",
"kommentar",
"hvornår",
"T",
"sky",
"sol",
"vind",
"sigt",
"kilde",
],
)
return observationer.sort_values(by="journal").set_index("journal").reset_index()
# ------------------------------------------------------------------------------
def find_observationer(navn):
"""Opbyg dataframe med allerede importerede observationer"""
print("Læser observationer")
try:
observationer = pd.read_excel(
navn + ".xlsx", sheet_name="Observationer", usecols="A:P"
)
except:
observationer = importer_observationer()
return observationer
# ------------------------------------------------------------------------------
def opbyg_punktoversigt(navn, nyetablerede, alle_punkter, nye_punkter):
# Læs den foreløbige punktoversigt, for at kunne se om der skal gås i databasen
try:
punktoversigt = pd.read_excel(
navn + ".xlsx", sheet_name="Punktoversigt", usecols="A:L"
)
except:
punktoversigt = pd.DataFrame(
columns=[
"punkt",
"fix",
"upub",
"år",
"kote",
"σ",
"ny",
"ny σ",
"Δ",
"kommentar",
"φ",
"λ",
]
)
assert punktoversigt.shape[0] == 0, "Forventede tom dataframe"
print("Opbygger punktoversigt")
# Find og tilføj de punkter, der mangler i punktoversigten.
manglende_punkter = set(alle_punkter) - set(punktoversigt["punkt"])
pkt = list(punktoversigt["punkt"]) + list(manglende_punkter)
# Forlæng punktoversigt, så der er plads til alle punkter
punktoversigt = punktoversigt.reindex(range(len(pkt)))
punktoversigt["punkt"] = pkt
# Geninstaller 'punkt'-søjlen som indexsøjle
punktoversigt = punktoversigt.set_index("punkt")
# Hent kote og placering fra databasen hvis vi ikke allerede har den
print("Checker for manglende kote og placering")
koteid = np.nan
for punkt in alle_punkter:
if not pd.isna(punktoversigt.at[punkt, "kote"]):
continue
if punkt in nye_punkter:
continue
# Vi undgår tilgang til databasen hvis vi allerede har alle koter
# ved først at hente koteid når vi ved vi har brug for den
if np.isnan(koteid):
koteid = hent_sridid(firedb, "EPSG:5799")
# TODO: Klar det med try:..except i stedet
assert koteid != 0, "DVR90 (EPSG:5799) ikke fundet i srid-tabel"
info = punkt_information(punkt)
kote = punkt_kote(info, koteid)
if kote is not None:
punktoversigt.at[punkt, "kote"] = kote.z
punktoversigt.at[punkt, "σ"] = kote.sz
punktoversigt.at[punkt, "år"] = kote.registreringfra.year
geom = punkt_geometri(info, punkt)
if pd.isna(punktoversigt.at[punkt, "φ"]):
punktoversigt.at[punkt, "φ"] = geom[1]
punktoversigt.at[punkt, "λ"] = geom[0]
# Nyetablerede punkter er ikke i databasen, så hent eventuelle manglende
# koter og placeringskoordinater i fanebladet 'Nyetablerede punkter'
for punkt in nye_punkter:
if pd.isna(punktoversigt.at[punkt, "kote"]):
punktoversigt.at[punkt, "kote"] = nyetablerede.at[punkt, "<NAME>"]
if | pd.isna(punktoversigt.at[punkt, "φ"]) | pandas.isna |
import pandas as pd
import praw
import os
import sqlite3
r_cid = os.getenv('reddit_api')
r_csec = os.getenv('reddit_api_sec')
r_uag = os.getenv('reddit_user')
def get_reddit(cid= r_cid, csec= r_csec, uag= r_uag, subreddit='wallstreetbets'):
#connect to sqlite database
conn = sqlite3.connect('stocks.sqlite')
#connect to reddit
reddit = praw.Reddit(client_id= cid, client_secret= csec, user_agent= uag)
#get the new reddit posts
posts = reddit.subreddit(subreddit).top('day', limit=100)
#load the posts into a pandas dataframe
p = []
for post in posts:
if post.selftext != "":
p.append([post.title, post.score, post.selftext])
else:
p.append([post.title, post.score, post.url])
#create dataframe from list
posts_df = | pd.DataFrame(p,columns=['title', 'score', 'post']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 10:15:21 2019
@author: Hamid.t
"""
# importing libararies for calculations
import pandas as pd
import numpy as np
# importing libraries for plots
import matplotlib.pyplot as plt
import seaborn as sns
#importing libraries for laoding the dataset
from sklearn.datasets import load_breast_cancer
#importing libraries to calculate ROC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import os
# preparing directories for saving plots
os.makedirs('./plots/pairs', exist_ok=True)
os.makedirs('./plots/comparative scatter', exist_ok=True)
# loading data
cancer = load_breast_cancer()
y = cancer.target
X = pd.DataFrame(cancer.data)
# setting colum names
X.columns= cancer.feature_names
y_plot=pd.DataFrame(y)
y_plot.columns=['diagnosis']
X_plot=X
y_plot['diagnosis'].replace(1,'M',inplace=True)
y_plot['diagnosis'].replace(0,'B',inplace=True)
# droppinh empty rows before plotting
X_plot.dropna(inplace=True)
y_plot.dropna(inplace=True)
frames=[X_plot,y_plot]
data_com= | pd.concat(frames,axis=1) | pandas.concat |
"""
Author: <NAME>
Last Updated: 21 jan 22
Purpose: Create heatmaps from google location history data.
"""
##Libraries
import json
import pandas as pd
import shapely.geometry as sg
import datetime as dt
import folium
from folium.plugins import HeatMap, heat_map
import os
from dateutil import parser
##Setup
pd.set_option('display.max_colwidth', None)
os.chdir(r"C:\Users\gabee\OneDrive\Documents\Programming\Python\Google Location History")
##Variables
dataURI = "C:/Users/gabee/OneDrive/Documents/Data/Google/Takeout/Location History/Records.json"
worldBox = sg.box(-140, -20, 140, 70)
##Helper functions
def pPrint(dictObj):
print(json.dumps(dictObj, indent=4, sort_keys=True))
def extract_activity(record):
try:
return record["activity"][0]["activity"][0]["type"]
except:
return "MISSING"
def parseData(json):
#https://github.com/gboeing/data-visualization/blob/main/location-history/google-location-history-simple.ipynb
#Datetime format
df = pd.read_json(json)
# parse lat, lon, and timestamp from the dict inside the locations column
df['lat'] = df['locations'].map(lambda x: x['latitudeE7'])
df['lon'] = df['locations'].map(lambda x: x['longitudeE7'])
df['timestamp'] = df['locations'].map(lambda x: x['timestamp'])
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
from __future__ import division
import numpy as np
import pandas as pd
import pickle
import os
from math import ceil
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.metrics import r2_score
warnings.simplefilter("ignore")
# colors = ["#3366cc", "#dc3912", "#109618", "#990099", "#ff9900"]
colors = sns.color_palette('muted')
labels = ['Remaining', 'First','Last']
def density_plot(df, Accuracy_base, Accuracy_LSTM, Accuracy_NG, save_fig, Out_put_name,model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM, Accuracy_NG]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i],linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.1, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.1, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.ylabel('Density', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('First activities',fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Middle']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i], linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2, alpha = 1)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.0, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
plt.ylabel('Density', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('Remaining activities',fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_duration_error(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('First Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Remaining']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (User-level)', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('Remaining Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_not_seperate_mid_first(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(7, 7))
ax1 = plt.subplot(1, 1, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['all']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
med = data.mean()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18) #
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def data_process_continuous(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])/3600
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])/3600
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct']))/data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp,accuracy_all
def calculate_error(result_df):
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
######
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration']) ** 2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs'] / result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
if len(result_df) > 0:
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
else:
R_sq = None
return RMSE, MAPE, MAE, R_sq
def r_sq_for_two_parts(data,y_mean):
data['RES'] = (data['Ground_truth_duration'] - data['Predict_duration'])**2
data['TOT'] = (data['Ground_truth_duration'] - y_mean)**2
R_sq = 1 - sum(data['RES'])/sum(data['TOT'])
return R_sq
def data_process_continuous_R_sq(data):
_, _, _, R_sq_all = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
mean_y = np.mean(data['Ground_truth_duration'])
R_sq_first = r_sq_for_two_parts(data_first, mean_y)
if len(data_Remaining)>0:
R_sq_Remaining = r_sq_for_two_parts(data_Remaining, mean_y)
else:
R_sq_Remaining = None
return R_sq_first, R_sq_Remaining, R_sq_all
def data_process_continuous_RMSE(data):
RMSE_all, _, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
RMSE_first, _, _, R_sq_first = calculate_error(data_first)
RMSE_Remaining, _, _, R_sq_Remaining = calculate_error(data_Remaining)
return RMSE_first, RMSE_Remaining, RMSE_all
def data_process_continuous_MAPE(data):
_, MAPE_all, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
_, MAPE_first, _, R_sq_first = calculate_error(data_first)
_, MAPE_Remaining, _, R_sq_Remaining = calculate_error(data_Remaining)
return MAPE_first, MAPE_Remaining, MAPE_all
def data_process_discrete(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct'])) / data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all
def generate_accuracy_file(individual_ID_list, output_fig, duration_error):
error_list=[]
total=0
error_Remaining = pd.DataFrame({'Remaining':[]})
error_first = pd.DataFrame({'first':[]})
error_Remaining_base = pd.DataFrame({'Remaining':[]})
error_first_base = pd.DataFrame({'first':[]})
Accuracy = {'Card_ID':[], 'Remaining':[],'first':[],'all':[]}
Accuracy_base = {'Card_ID':[], 'Remaining':[],'first':[],'all':[]}
Accuracy_LSTM = {'Card_ID': [], 'Remaining': [], 'first': [], 'all': []}
Accuracy_NG = {'Card_ID': [], 'Remaining': [], 'first': [], 'all': []}
# data
Card_ID_used = []
# individual_ID_list = individual_ID_list[0:80]
#############IOHMM
for Card_ID in individual_ID_list:
# if output_fig == 'duration':
# file_name = data_path + 'results/result_' + str(Card_ID) + 'test' + '.csv'
# else:
# file_name = data_path + 'results/result_Location_' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_con_dur+loc_' + str(Card_ID) + 'test' + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for IOHMM')
continue
else:
Card_ID_used.append(Card_ID)
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy['first'].append(R_sq_first)
Accuracy['Remaining'].append(R_sq_Remaining)
Accuracy['all'].append(R_sq_all)
Accuracy['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
#print (error_first_temp)
error_first = pd.concat([error_first, error_first_temp], axis = 0)
error_Remaining = pd.concat([error_Remaining, error_Remaining_temp], axis = 0)
Accuracy['first'].append(Accuracy_first_temp)
Accuracy['Remaining'].append(Accuracy_temp)
Accuracy['all'].append(accuracy_all)
Accuracy['Card_ID'].append(Card_ID)
# data
############## LSTM
Card_ID_used_for_base = list(set(Card_ID_used))
for Card_ID in Card_ID_used_for_base:
if output_fig == 'duration':
# file_name = data_path + 'results/result_LSTM' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_LSTM_con_dur' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_NGRAM_con_dur_' + str(Card_ID) + '.csv'
else:
file_name = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_NGRAM_location_' + str(Card_ID) + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for LSTM')
continue
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy_LSTM['first'].append(R_sq_first)
Accuracy_LSTM['Remaining'].append(R_sq_Remaining)
Accuracy_LSTM['all'].append(R_sq_all)
Accuracy_LSTM['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
#print (error_first_temp)
error_first = pd.concat([error_first, error_first_temp], axis = 0)
error_Remaining = pd.concat([error_Remaining, error_Remaining_temp], axis = 0)
Accuracy_LSTM['first'].append(Accuracy_first_temp)
Accuracy_LSTM['Remaining'].append(Accuracy_temp)
Accuracy_LSTM['all'].append(accuracy_all)
Accuracy_LSTM['Card_ID'].append(Card_ID)
############## NG
Card_ID_used_for_base = list(set(Card_ID_used))
for Card_ID in Card_ID_used_for_base:
if output_fig == 'duration':
# file_name = data_path + 'results/result_LSTM' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_LSTM_con_dur' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_NGRAM_con_dur_' + str(Card_ID) + '.csv'
else:
#file_name = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_NGRAM_location_' + str(Card_ID) + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for NG')
continue
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy_NG['first'].append(R_sq_first)
Accuracy_NG['Remaining'].append(R_sq_Remaining)
Accuracy_NG['all'].append(R_sq_all)
Accuracy_NG['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
#print (error_first_temp)
error_first = pd.concat([error_first, error_first_temp], axis = 0)
error_Remaining = pd.concat([error_Remaining, error_Remaining_temp], axis = 0)
Accuracy_NG['first'].append(Accuracy_first_temp)
Accuracy_NG['Remaining'].append(Accuracy_temp)
Accuracy_NG['all'].append(accuracy_all)
Accuracy_NG['Card_ID'].append(Card_ID)
############## MC
for Card_ID in Card_ID_used_for_base:
if output_fig == 'duration':
# file_name = data_path + 'results/result_MC' + str(Card_ID) + '.csv'
file_name = data_path + 'results/result_LR' + str(Card_ID) + 'test.csv'
else:
file_name = data_path + 'results/result_Location_MC' + str(Card_ID) + '.csv'
if not os.path.exists(file_name):
print(Card_ID, 'does not exist for Base')
continue
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy_base['first'].append(R_sq_first)
Accuracy_base['Remaining'].append(R_sq_Remaining)
Accuracy_base['all'].append(R_sq_all)
Accuracy_base['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
# print (error_first_temp)
error_first_base = pd.concat([error_first_base, error_first_temp], axis=0)
error_Remaining_base = | pd.concat([error_Remaining_base, error_Remaining_temp], axis=0) | pandas.concat |
#%%
# ---------------------------------
# 사전 준비(데이터 등 기타)
# ----------------------------------
import numpy as np
import pandas as pd
# train_x는 학습 데이터, train_y는 목적 변수, test_x는 테스트 데이터
# pandas의 DataFrame, Series을 사용합니다. (numpy의 array로 사용하기도 합니다.)
train = | pd.read_csv('../input/sample-data/train.csv') | pandas.read_csv |
import os
import random
import numpy as np
import pandas as pd
import scanpy as sc
from tqdm import tqdm
from multiprocessing import Pool, RLock
class Sampling:
def sample(self, ncells=10000, pop_fp=None, sim_fp=None, cache=True, return_data=False):
print (f"Simulation: {self.network_name} Sampling Cells...", flush=True)
cells_meta, gene_expr = self.sampling_cells(ncells, sim_fp)
print (f"Simulation: {self.network_name} Sampling Molecules...", flush=True)
lib_sizes = self.sampling_molecules(gene_expr, pop_fp)
cells_meta = self.clean_cells_metadata(cells_meta, lib_sizes)
cells_meta = cells_meta.reset_index(drop=True)
if cache:
print (f"Simulation: {self.network_name} Caching....", flush=True)
cells_meta.to_csv(os.path.join(self.metadata_dir,
'cells_metadata.csv.gz'),
compression='gzip', index=False)
if return_data:
fp = os.path.join(self.metadata_dir, 'gene_expression.csv.gz')
gene_expr = pd.read_csv(fp, dtype=np.int16)
return cells_meta, gene_expr
else:
return None, None
def sampling_cells(self, ncells, sim_fp):
if sim_fp is None:
sim_fp = os.path.join(self.results_dir, 'simulated_counts.csv.gz')
self.cell_sim_meta = pd.read_csv(f'{self.results_dir}/cell_metadata.csv.gz')
self.cell_sim_meta = self.cell_sim_meta.reset_index().rename(columns={'index': 'cell_i'})
if ncells > self.cell_sim_meta.shape[0]:
raise Exception(f"Simulation: {self.network_name} Number of cells requested is greater than the number of cells simulated. Sample fewer cells...")
cells_meta = []
cells = np.array([i for i in range(self.cell_sim_meta.shape[0])])
cells_meta = self.get_cells_meta()
cells = self.sample_cells_per_grna(cells_meta, ncells)
cells_meta = cells_meta.iloc[cells]
gene_expr = self.load_cells(cells, sim_fp)
return cells_meta, gene_expr
def sampling_molecules(self, gene_expr, pop_fp=None):
if pop_fp is None:
pop = sc.read_loom(self.pop_fp)
else:
pop = sc.read_loom(pop_fp)
realcounts = pop.X.toarray()
cell_umi = pop.obs.total_counts.values
lib_size = self.calc_library_size(cell_umi, gene_expr)
self.downsampling(realcounts, gene_expr, lib_size)
return lib_size
def clean_cells_metadata(self, meta, lib_sizes):
meta['lib_size'] = lib_sizes
meta['grna'] = meta['sim_label'].apply(lambda x: "_".join(x.split('_')[0:2]))
meta['target_gene'] = meta['sim_label'].apply(lambda x: x.split('-grna')[0])
if self.crispr_type == 'knockout':
meta['is_cell_perturbed'] = meta['sim_label'].apply(lambda x: x.split('_')[-1])
meta.loc[meta.target_gene == self.ctrl_label, 'is_cell_perturbed'] = self.ctrl_label
else:
meta['is_cell_perturbed'] = 'PRT'
meta.loc[meta.target_gene == self.ctrl_label, 'is_cell_perturbed'] = self.ctrl_label
meta = meta.reset_index(drop=True)
return meta
def load_cells(self, sampled_cells, sim_fp):
df = | pd.read_csv(sim_fp, dtype=np.int16) | pandas.read_csv |
from finrl_meta.data_processors.processor_alpaca import AlpacaProcessor as Alpaca
from finrl_meta.data_processors.processor_wrds import WrdsProcessor as Wrds
from finrl_meta.data_processors.processor_yahoofinance import YahooFinanceProcessor as YahooFinance
from finrl_meta.data_processors.processor_binance import BinanceProcessor as Binance
from finrl_meta.data_processors.processor_ricequant import RiceQuantProcessor as RiceQuant
from finrl_meta.data_processors.processor_joinquant import JoinquantProcessor
from finrl_meta.data_processors.processor_tusharepro import TushareProProcessor as Tusharepro
import pandas as pd
import numpy as np
import os
class DataProcessor():
def __init__(self, data_source, **kwargs):
self.data_source = data_source
self.dataframe = pd.DataFrame()
if self.data_source == 'alpaca':
try:
# users should input values: kwargs['API_KEY'], kwargs['API_SECRET'], kwargs['APCA_API_BASE_URL'], kwargs['API']
self.processor = Alpaca(data_source, **kwargs)
print('Alpaca successfully connected')
except:
raise ValueError('Please input correct account info for alpaca!')
elif self.data_source == "joinquant":
try:
# users should input values: kwargs['username'], kwargs['password']
self.processor = JoinquantProcessor(data_source, **kwargs)
print('Joinquant successfully connected')
except:
raise ValueError('Please input correct account info for joinquant!')
elif self.data_source == 'ricequant':
try:
# users should input values: kwargs['username'], kwargs['password']
self.processor = RiceQuant(data_source, **kwargs)
print('Ricequant successfully connected')
except:
raise ValueError('Please input correct account info for ricequant!')
elif self.data_source == 'wrds':
try:
# users should input values: kwargs['if_offline']
self.processor = Wrds(data_source, **kwargs)
print('Wrds successfully connected')
except:
raise ValueError('Please input correct account info for wrds!')
elif self.data_source == 'yahoofinance':
try:
self.processor = YahooFinance(data_source, **kwargs)
print('Yahoofinance successfully connected')
except:
raise ValueError('Please input correct account info for yahoofinance!')
elif self.data_source == 'binance':
try:
self.processor = Binance(data_source, **kwargs)
print('Binance successfully connected')
except:
raise ValueError('Please input correct account info for binance!')
elif self.data_source == "tusharepro":
try:
# users should input values: kwargs['token'], choose to input values: kwargs['adj']
self.processor = Tusharepro(data_source, **kwargs)
print('tusharepro successfully connected')
except:
raise ValueError('Please input correct account info for tusharepro!')
else:
raise ValueError('Data source input is NOT supported yet.')
def download_data(self, ticker_list, start_date, end_date, time_interval):
self.processor.download_data(ticker_list=ticker_list,
start_date=start_date,
end_date=end_date,
time_interval=time_interval)
self.dataframe = self.processor.dataframe
def clean_data(self):
self.processor.dataframe = self.dataframe
self.processor.clean_data()
self.dataframe = self.processor.dataframe
def add_technical_indicator(self, tech_indicator_list):
self.tech_indicator_list = tech_indicator_list
self.processor.add_technical_indicator(tech_indicator_list)
self.dataframe = self.processor.dataframe
def add_turbulence(self):
self.processor.add_turbulence()
self.dataframe = self.processor.dataframe
def add_vix(self):
self.processor.add_vix()
self.dataframe = self.processor.dataframe
def df_to_array(self, if_vix) -> np.array:
price_array, tech_array, turbulence_array = self.processor.df_to_array(self.tech_indicator_list, if_vix)
# fill nan with 0 for technical indicators
tech_nan_positions = np.isnan(tech_array)
tech_array[tech_nan_positions] = 0
return price_array, tech_array, turbulence_array
def run(self, ticker_list, start_date, end_date, time_interval,
technical_indicator_list, if_vix, cache=False):
if time_interval == "1s" and self.data_source != "binance":
raise ValueError("Currently 1s interval data is only supported with 'binance' as data source")
cache_csv = '_'.join(ticker_list + [self.data_source, start_date, end_date, time_interval]) + '.csv'
cache_dir = './cache'
cache_path = os.path.join(cache_dir, cache_csv)
if cache and os.path.isfile(cache_path):
print('Using cached file {}'.format(cache_path))
self.tech_indicator_list = technical_indicator_list
self.dataframe = | pd.read_csv(cache_path) | pandas.read_csv |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}],
u('fxVersion'): u('9.0'),
u('location'): u('zh-CN'),
u('operatingSystem'): u('WINNT Windows NT 5.1'),
u('surveyAnswers'): u(''),
u('task_guid'): u('d69fbd15-2517-45b5-8a17-bb7354122a75'),
u('tpVersion'): | u('1.2') | pandas.compat.u |
import datetime
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import pandas as pd
import tushare as ts
from lib.token import TOKEN
from setting import split_span
# setting pro api, please use your own token, back for work test
ts.set_token(TOKEN)
PRO = ts.pro_api()
def date_split(start_date, end_date, span=split_span):
"""
utility func to split dates into a list of time spans
Parameters
----------
start_date: str
start date with format yyyymmdd
end_date: str
end date with format yyyymmdd
span:int
days of time span, must > 1 or there will be bug
Returns
-------
list of start/end date tuples ("yyyymmdd", "yyyymmdd")
"""
start_date = datetime.datetime.strptime(start_date, "%Y%m%d")
end_date = datetime.datetime.strptime(end_date, "%Y%m%d")
time_spans = []
while (end_date - start_date).days > span:
s = datetime.datetime.strftime(start_date, "%Y%m%d")
start_date = start_date + datetime.timedelta(span)
e = datetime.datetime.strftime(start_date - datetime.timedelta(1), "%Y%m%d")
time_spans.append((s, e))
s = datetime.datetime.strftime(start_date, "%Y%m%d")
e = datetime.datetime.strftime(end_date, "%Y%m%d")
time_spans.append((s, e))
return time_spans
def hist_data_down(stock_id, start_date, end_date):
"""
utility func to download historical trade data
Parameters
----------
stock_id: str
stock id code
start_date: str
start date with format yyyymmdd
end_date: str
end date with format yyyymmdd
Returns
-------
df of stock trade history data
"""
time_spans = date_split(start_date, end_date)
dfs = []
for start, end in reversed(time_spans):
try:
d = PRO.daily(ts_code=stock_id, start_date=start, end_date=end)
dfs.append(d)
except:
raise RuntimeError("{} to {} download error".format(start, end))
df = | pd.concat(dfs, axis=0) | pandas.concat |
import requests
import pandas as pd
import pickle
import datetime
import guithread
import numpy as np
import concurrent.futures
import time
from os import makedirs
from config import text_width, max_thread_count
class Acquisition(guithread.GUIThread):
def __init__(self, filename='default.csv', brain_region='All', species='All', cell_type='All'):
self.filename = filename
self.brain_region, self.species, self.cell_type = brain_region, species, cell_type
self.session = requests.Session()
self.params_widg = {}
if brain_region != 'All':
self.params_widg['brain_region'] = 'brain_region:' + brain_region
if species != 'All':
self.params_widg['species'] = 'species:' + species
if cell_type != 'All':
self.params_widg['cell_type'] = 'cell_type:' + cell_type
self.params = {}
self.params['page'] = 0
self.params['size'] = 500
fq = []
first = 0
for key, value in self.params_widg.items():
if first == 0:
first = 1
self.params['q'] = value
else:
fq.append(value)
self.params['fq'] = fq
if brain_region == 'All' and species == 'All' and cell_type == 'All':
self.url = 'http://neuromorpho.org/api/neuron'
else:
self.url = 'http://neuromorpho.org/api/neuron/select'
super().__init__()
def get_first_page(self):
brain_region, species, cell_type = self.brain_region, self.species, self.cell_type
s = self.session
first_page_response = s.get(self.url, params=self.params)
print(first_page_response.json())
if first_page_response.status_code == 404 or first_page_response.status_code == 500:
self.print_to_textbox("Unable to get CSV! Status code: " + str(first_page_response.status_code))
return 0
elif 'status' in first_page_response.json() and first_page_response.json()['status'] == 500:
self.print_to_textbox("Unable to get CSV! Status code: " + str(first_page_response.json()['status']))
return 0
print(str(first_page_response.request.url))
print(first_page_response.status_code)
return first_page_response.json()['page']['totalPages'], first_page_response.json()['page']['totalElements']
def get_morphometry(self, np_array):
morphometry = []
for i in np_array:
url = "http://neuromorpho.org/api/morphometry/id/" + str(i)
response = self.session.get(url)
response.raise_for_status()
json_data = response.json()
morphometry.append(json_data)
if response.status_code == 200:
text_status_code = '\u2705'
else:
text_status_code = '\u274C'
self.print_to_textbox('Querying cells {} -> status code: {} {}'.format(
str(i), response.status_code, text_status_code)
)
return morphometry
def run(self):
file_name = ""
try:
brain_region, species, cell_type = self.brain_region, self.species, self.cell_type
s = self.session
starttime = datetime.datetime.now()
self.set_progress(0)
self.print_to_textbox(brain_region + '\n' + species + '\n' + cell_type + '\n')
totalPages, totalElements = self.get_first_page()
self.print_to_textbox("Getting Neurons - total elements:" + str(totalElements) +
"\nDo you want to continue?")
timer = 10
while self.is_paused and not self.is_killed:
time.sleep(1)
timer -= 1
self.print_to_textbox("Will continue in: " + str(timer) + " seconds")
if timer == 0:
break
if self.is_killed:
self.print_to_textbox("CANCELLED!!!")
self.print_to_textbox("\n" + "#" * text_width + "\n")
self.set_progress(0)
return 0
self.print_to_textbox("Continuing...")
df_dict = {
'NeuronID': list(),
'Neuron Name': list(),
'Archive': list(),
'Note': list(),
'Age Scale': list(),
'Gender': list(),
'Age Classification': list(),
'Brain Region': list(),
'Cell Type': list(),
'Species': list(),
'Strain': list(),
'Scientific Name': list(),
'Stain': list(),
'Experiment Condition': list(),
'Protocol': list(),
'Slicing Direction': list(),
'Reconstruction Software': list(),
'Objective Type': list(),
'Original Format': list(),
'Domain': list(),
'Attributes': list(),
'Magnification': list(),
'Upload Date': list(),
'Deposition Date': list(),
'Shrinkage Reported': list(),
'Shrinkage Corrected': list(),
'Reported Value': list(),
'Reported XY': list(),
'Reported Z': list(),
'Corrected Value': list(),
'Corrected XY': list(),
'Corrected Z': list(),
'Slicing Thickness': list(),
'Min Age': list(),
'Max Age': list(),
'Min Weight': list(),
'Max Weight': list(),
'Png URL': list(),
'Reference PMID': list(),
'Reference DOI': list(),
'Physical Integrity': list()}
self.print_to_textbox("Getting Neurons - total pages:" + str(totalPages))
progress_step = 20.0/totalPages
for pageNum in range(totalPages):
self.params['page'] = pageNum
response = s.get(self.url, params=self.params)
if response.status_code == 200:
text_status_code = '\u2705'
else:
text_status_code = '\u274C'
self.print_to_textbox('Querying page {} -> status code: {} {}'.format(
pageNum, response.status_code, text_status_code))
if response.status_code == 200: # only parse successful requests
data = response.json()
for row in data['_embedded']['neuronResources']:
df_dict['NeuronID'].append(str(row['neuron_id']))
df_dict['Neuron Name'].append(str(row['neuron_name']))
df_dict['Archive'].append(str(row['archive']))
df_dict['Note'].append(str(row['note']))
df_dict['Age Scale'].append(str(row['age_scale']))
df_dict['Gender'].append(str(row['gender']))
df_dict['Age Classification'].append(str(row['age_classification']))
df_dict['Brain Region'].append(str(row['brain_region']))
df_dict['Cell Type'].append(str(row['cell_type']))
df_dict['Species'].append(str(row['species']))
df_dict['Strain'].append(str(row['strain']))
df_dict['Scientific Name'].append(str(row['scientific_name']))
df_dict['Stain'].append(str(row['stain']))
df_dict['Experiment Condition'].append(str(row['experiment_condition']))
df_dict['Protocol'].append(str(row['protocol']))
df_dict['Slicing Direction'].append(str(row['slicing_direction']))
df_dict['Reconstruction Software'].append(str(row['reconstruction_software']))
df_dict['Objective Type'].append(str(row['objective_type']))
df_dict['Original Format'].append(str(row['original_format']))
df_dict['Domain'].append(str(row['domain']))
df_dict['Attributes'].append(str(row['attributes']))
df_dict['Magnification'].append(str(row['magnification']))
df_dict['Upload Date'].append(str(row['upload_date']))
df_dict['Deposition Date'].append(str(row['deposition_date']))
df_dict['Shrinkage Reported'].append(str(row['shrinkage_reported']))
df_dict['Shrinkage Corrected'].append(str(row['shrinkage_corrected']))
df_dict['Reported Value'].append(str(row['reported_value']))
df_dict['Reported XY'].append(str(row['reported_xy']))
df_dict['Reported Z'].append(str(row['reported_z']))
df_dict['Corrected Value'].append(str(row['corrected_value']))
df_dict['Corrected XY'].append(str(row['corrected_xy']))
df_dict['Corrected Z'].append(str(row['corrected_z']))
df_dict['Slicing Thickness'].append(str(row['slicing_thickness']))
df_dict['Min Age'].append(str(row['min_age']))
df_dict['Max Age'].append(str(row['max_age']))
df_dict['Min Weight'].append(str(row['min_weight']))
df_dict['Max Weight'].append(str(row['max_weight']))
df_dict['Png URL'].append(str(row['png_url']))
df_dict['Reference PMID'].append(str(row['reference_pmid']))
df_dict['Reference DOI'].append(str(row['reference_doi']))
df_dict['Physical Integrity'].append(str(row['physical_Integrity']))
self.set_progress(pageNum * progress_step)
self.set_progress(20)
self.print_to_textbox("Creating neuron Data Frame")
neurons_df = pd.DataFrame(df_dict)
self.set_progress(25)
self.print_to_textbox("Pickling neurons")
makedirs("./output", exist_ok=True)
neurons_df.to_pickle("./output/neurons.pkl")
self.set_progress(30)
# the ID number of previously obtained neurons is used to obtain their morphometric details
np_array = neurons_df['NeuronID'].to_numpy()
np_arrays = np.array_split(np_array, max_thread_count)
self.print_to_textbox("Getting morphometry")
morphometry = []
progress_step = 40.0 / np_array.size
progress_value = 0.0
with concurrent.futures.ThreadPoolExecutor(max_workers=max_thread_count) as executor:
futures = []
for n in np_arrays:
futures.append(executor.submit(self.get_morphometry, np_array=n))
for future in concurrent.futures.as_completed(futures):
morphometry.extend(future.result())
print(morphometry)
self.set_progress(70)
self.print_to_textbox("Creating morphometry Data Frame")
df_dict = {}
df_dict['NeuronID'] = []
df_dict['Surface'] = []
df_dict['Volume'] = []
df_dict['Soma surface'] = []
df_dict['Number of stems'] = []
df_dict['Number of bifurcations'] = []
df_dict['Number of branches'] = []
df_dict['Width'] = []
df_dict['Height'] = []
df_dict['Depth'] = []
df_dict['Diameter'] = []
df_dict['Euclidian distance'] = []
df_dict['Path distance'] = []
df_dict['Branch order'] = []
df_dict['Contraction'] = []
df_dict['Fragmentation'] = []
df_dict['Partition asymmetry'] = []
df_dict['Pk classic'] = []
df_dict['Bifurcation angle local'] = []
df_dict['Fractal dimension'] = []
df_dict['Bifurcation angle remote'] = []
df_dict['Length'] = []
for row in morphometry:
df_dict['NeuronID'].append(str(row['neuron_id']))
df_dict['Surface'].append(str(row['surface']))
df_dict['Volume'].append(str(row['volume']))
df_dict['Soma surface'].append(str(row['soma_Surface']))
df_dict['Number of stems'].append(str(row['n_stems']))
df_dict['Number of bifurcations'].append(str(row['n_bifs']))
df_dict['Number of branches'].append(str(row['n_branch']))
df_dict['Width'].append(str(row['width']))
df_dict['Height'].append(str(row['height']))
df_dict['Depth'].append(str(row['depth']))
df_dict['Diameter'].append(str(row['diameter']))
df_dict['Euclidian distance'].append(str(row['eucDistance']))
df_dict['Path distance'].append(str(row['pathDistance']))
df_dict['Branch order'].append(str(row['branch_Order']))
df_dict['Contraction'].append(str(row['contraction']))
df_dict['Fragmentation'].append(str(row['fragmentation']))
df_dict['Partition asymmetry'].append(str(row['partition_asymmetry']))
df_dict['Pk classic'].append(str(row['pk_classic']))
df_dict['Bifurcation angle local'].append(str(row['bif_ampl_local']))
df_dict['Fractal dimension'].append(str(row['fractal_Dim']))
df_dict['Bifurcation angle remote'].append(str(row['bif_ampl_remote']))
df_dict['Length'].append(str(row['length']))
morphometry_df = pd.DataFrame(df_dict)
self.set_progress(75)
self.print_to_textbox("Pickling morphometry")
morphometry_df.to_pickle("./output/morphometry.pkl")
# the following is a list of steps used to currate the morphometric data
# and merge the two obtained dataframes (general neuron parameters and morphometric data)
# this results in the creation of final .pkl and .csv files at the end of the notebook
neurons = open("./output/morphometry.pkl", "rb")
neurons_df = pickle.load(neurons)
neurons.close()
self.set_progress(80)
self.print_to_textbox(neurons_df)
neurons_df = neurons_df.replace({'Soma surface': {'None': ''}}, regex=True)
neurons_df["Surface"] = pd.to_numeric(neurons_df["Surface"], downcast="float")
neurons_df["Volume"] = pd.to_numeric(neurons_df["Volume"], downcast="float")
neurons_df["Soma surface"] = pd.to_numeric(neurons_df["Soma surface"], downcast="float")
neurons_df["Number of stems"] = pd.to_numeric(neurons_df["Number of stems"], downcast="float")
neurons_df["Number of bifurcations"] = pd.to_numeric(neurons_df["Number of bifurcations"], downcast="float")
neurons_df["Number of branches"] = pd.to_numeric(neurons_df["Number of branches"], downcast="float")
neurons_df["Width"] = pd.to_numeric(neurons_df["Width"], downcast="float")
neurons_df["Height"] = pd.to_numeric(neurons_df["Height"], downcast="float")
neurons_df["Depth"] = pd.to_numeric(neurons_df["Depth"], downcast="float")
neurons_df["Diameter"] = pd.to_numeric(neurons_df["Diameter"], downcast="float")
neurons_df["Euclidian distance"] = pd.to_numeric(neurons_df["Euclidian distance"], downcast="float")
neurons_df["Path distance"] = pd.to_numeric(neurons_df["Path distance"], downcast="float")
neurons_df["Branch order"] = pd.to_numeric(neurons_df["Branch order"], downcast="float")
neurons_df["Contraction"] = pd.to_numeric(neurons_df["Contraction"], downcast="float")
neurons_df["Fragmentation"] = pd.to_numeric(neurons_df["Fragmentation"], downcast="float")
neurons_df["Partition asymmetry"] = pd.to_numeric(neurons_df["Partition asymmetry"], downcast="float")
neurons_df["Pk classic"] = pd.to_numeric(neurons_df["Pk classic"], downcast="float")
neurons_df["Bifurcation angle local"] = pd.to_numeric(neurons_df["Bifurcation angle local"], downcast="float")
neurons_df["Fractal dimension"] = pd.to_numeric(neurons_df["Fractal dimension"], downcast="float")
neurons_df["Number of branches"] = pd.to_numeric(neurons_df["Number of branches"], downcast="float")
neurons_df["Bifurcation angle remote"] = pd.to_numeric(neurons_df["Bifurcation angle remote"], downcast="float")
neurons_df["Length"] = | pd.to_numeric(neurons_df["Length"], downcast="float") | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# In[6]:
import cx_Oracle
import time
from datetime import date
import pandas as pd
import os
import numpy
import omdtfn as odt
#conn= MySQLdb.connect("localhost","root","admin","omdb")
#df_mysql = pd.read_sql("select * from sitedb",conn)
omdb = os.getcwd() + "\\" + "OMDB.csv"
pntxt = os.getcwd() + "\\" + "Periodic_Notification.txt"
pth = os.getcwd() + "\\" + "WRT1.csv"
pth2 = os.getcwd() + "\\" + "WRT2.csv"
#lambda <args> : <return Value> if <condition > ( <return value > if <condition> else <return value>)
TS = lambda x : '2G' if ('2G SITE DOWN' in x) else ('3G' if ('3G SITE DOWN' in x) else ('4G' if ('4G SITE DOWN' in x) else ('MF' if ('MAIN' in x) else ('DC' if ('VOLTAGE' in x) else ('TM' if ('TEMPERATURE' in x) else ('SM' if ('SMOKE' in x) else ('GN' if ('GEN' in x) else ('GN' if ('GENSET' in x) else ('TH' if ('THEFT' in x) else ('2_CELL' if ('2G CELL DOWN' in x) else ('3_CELL' if ('3G CELL DOWN' in x) else ('4_CELL' if ('4G CELL DOWN' in x) else "NA"))))))))))))
def write2txt(flname,txt):
fo = open(flname,"w+")
txt = fo.write(txt)
fo.close()
class omdf:
def __init__(self,dic):
self.df = pd.DataFrame(dic)
self.arr = self.df.to_numpy()
self.lst = list(self.df.columns.values)
self.aList = []
def df_addcol_lamda(self):
self.df['cat'] = self.df.apply(lambda row: TS(row.Summary), axis = 1)
return self.df.to_dict()
def df_addcol_fdic(self,d,newcolname):
self.df[newcolname] = self.df['scode'].map(d)
return self.df.to_dict()
def df_apply_on_col(self,newcolname):
self.df[newcolname] = self.df.apply(lambda x : x.CustomAttr15[0:5], axis = 1)
return self.df.to_dict()
def df_remove_col_by_list(self,lis):
ndf = self.df[lis]
return ndf.to_dict()
def PNPW(dic,lis):
ndf = pd.DataFrame(dic)
ar = ndf.to_numpy()
lcol = (ar).shape[1]
j = 0
G2T = 0
G3T = 0
G4T = 0
heap = ""
for i in lis:
g2 = ndf[ndf['cat'].str.contains('MF') & ndf['Zone'].str.contains(lis[j])]
g3 = ndf[ndf['cat'].str.contains('DL') & ndf['Zone'].str.contains(lis[j])]
G2T = g2.shape[0] + G2T
G3T = g3.shape[0] + G3T
hd = str(lis[j]) + ": " + str(g2.shape[0]) + "/" + str(g3.shape[0])
if j == 0:
heap = hd
else:
heap = heap + '\n' + hd
j = j + 1
reg = 'Region: ' + 'MF/DL'
Nat = 'National: ' + str(G2T) + '/' + str(G3T)
heaps = reg + '\n' + Nat + '\n' + '\n' + heap
return heaps
def ByCat(dic,lis,strval):
ndf = pd.DataFrame(dic)
ar = ndf.to_numpy()
lcol = (ar).shape[1]
j = 0
G2T = 0
heap = ""
for i in lis:
g2 = ndf[ndf['cat'].str.contains(strval) & ndf['Zone'].str.contains(lis[j])]
G2T = g2.shape[0] + G2T
hd = str(lis[j]) + ": " + str(g2.shape[0])
if j == 0:
heap = hd
else:
heap = heap + '\n' + hd
j = j + 1
heaps = "National: " + str(G2T) + '\n' + '\n' + heap
return heaps
def PN_Format(dic,lis):
ndf = pd.DataFrame(dic)
ar = ndf.to_numpy()
lcol = (ar).shape[1]
j = 0
G2T = 0
G3T = 0
G4T = 0
heap = ""
for i in lis:
g2 = ndf[ndf['cat'].str.contains('2G') & ndf['Zone'].str.contains(lis[j])]
g3 = ndf[ndf['cat'].str.contains('3G') & ndf['Zone'].str.contains(lis[j])]
g4 = ndf[ndf['cat'].str.contains('4G') & ndf['Zone'].str.contains(lis[j])]
G2T = g2.shape[0] + G2T
G3T = g3.shape[0] + G3T
G4T = g4.shape[0] + G4T
hd = str(lis[j]) + ": " + str(g2.shape[0]) + "/" + str(g3.shape[0]) + "/" + str(g4.shape[0])
if j == 0:
heap = hd
else:
heap = heap + '\n' + hd
j = j + 1
hd = "Update of Site Down at " + odt.hrmin() + ' On ' + odt.dtmnyr()
reg = 'Region: ' + '2G/3G/4G'
Nat = 'National: ' + str(G2T) + '/' + str(G3T) + '/' + str(G4T)
heaps = hd + '\n' + '\n' + reg + '\n' + Nat + '\n' + '\n' + heap
return heaps
def PN(dicc):
ls1 = ['CustomAttr15','Resource','Summary','LastOccurrence','BCCH']
ls2 = ['Code','Zone']
dfsingle = pd.DataFrame(dicc)
dfomdb = pd.read_csv(omdb)
dfs = dfsingle[ls1]
dfdb = dfomdb[ls2]
x1 = omdf(dfs)
dfs1 = x1.df_addcol_lamda()
dfx = pd.DataFrame(dfs1)
dfx.to_csv(pth)
x2 = omdf(dfs1)
dfs2 = pd.DataFrame(x2.df_apply_on_col('Code'))
mergedDf = dfs2.merge(dfdb, on='Code')
#dff = mergedDf[mergedDf['BCCH'].str.contains('YES')]
mergedDf.to_csv(pth2)
ls3 = ['DHK_S','DHK_N','DHK_M','CTG_S','CTG_N','CTG_M','COM','NOA','SYL','MYM','BAR','KHL','KUS','RAJ','RANG']
#print(ByCat(mergedDf.to_dict(),ls3,"4G"))
txt = PN_Format(mergedDf.to_dict(),ls3)
txtpw = PNPW(mergedDf.to_dict(),ls3)
#print(txtpw)
#write2txt(pntxt,txt)
return txt
def semqry1(tbl,usr, pas, selcol):
conn = cx_Oracle.connect(usr, pas, 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn.version)
tim = time.localtime()
tdy = date.today()
foldr = os.getcwd() + "\\download\\" + tdy.strftime('%m%d%y') + time.strftime("%H%M", tim) + '_' + tbl + '.csv'
dy_p = odt.day_minus(1)
dy_f = odt.day_plus(1)
Q1 = "FROM " + tbl + " WHERE TYPE=1 AND SUMMARY LIKE 'ERI-RRU THEFT' "
Q2 = "AND (LASTOCCURRENCE BETWEEN TO_DATE('" + dy_p + "','DD-MM-RRRR') AND TO_DATE('" + dy_f + "','DD-MM-RRRR'))"
QF = "SELECT" + selcol + Q1 + Q2
print(QF)
print('----------------')
df = | pd.read_sql(QF, con=conn) | pandas.read_sql |
"""Parser utils
This script provides functions used by seqQscorer to parse input files.
Methods
-------
get_FastQC_features(feature_file_path)
parses the RAW features from the FastQC tool
parse_BowtieSE(lines)
parses the MAP features from Bowtie2 from single-end sequencing samples
parse_BowtiePE(lines)
parses the MAP features from Bowtie2 from paired-end sequencing samples
get_Bowtie_features(feature_file_path)
directly used by seqQscorer, parses Bowtie2 input and defines the run type
get_readsAnno_features(feature_file_path)
parses the LOC features from ChIPseeker
get_TSS_features(feature_file_path)
parses the TSS features from ChIPpeakAnno
generate_input_data(indir, feature_sets, run_type, medians, noVerbose=True, restrict=None)
given the input directory this function reads in the feature sets for
all samples provided by the user
date: 2020-11-02
author: <NAME>
"""
import os
import numpy as np
import pandas as pd
global FastQC_value_map
FastQC_value_map = {'FAIL': 0, 'WARN': 1, 'PASS': 2}
def get_RAW_features(feature_file_path):
features = {}
with open(feature_file_path, 'r') as feature_file:
for line in feature_file:
line = line.strip().split('\t')
feature_name = line[1].replace(' ', '_')
value = FastQC_value_map.get(line[0], np.nan)
features['FastQC_'+feature_name] = value
return features
def parse_BowtieSE(lines):
lines = lines.split('\n')
features = {}
features['BowtieSE_no_mapping'] = float(lines[2].split('(')[1].split('%')[0])
features['BowtieSE_uniquely'] = float(lines[3].split('(')[1].split('%')[0])
features['BowtieSE_multiple'] = float(lines[4].split('(')[1].split('%')[0])
features['BowtieSE_overall'] = float(lines[5].split('%')[0])
# for mixed Bowtie
features['BowtieMI_no_mapping'] = features['BowtieSE_no_mapping']
features['BowtieMI_uniquely'] = features['BowtieSE_uniquely']
features['BowtieMI_multiple'] = features['BowtieSE_multiple']
features['BowtieMI_overall'] = features['BowtieSE_overall']
return features
def parse_BowtiePE(lines):
lines = lines.split('\n')
features = {}
features['BowtiePE_con_no_mapping'] = float(lines[2].split('(')[1].split('%')[0])
features['BowtiePE_con_uniquely'] = float(lines[3].split('(')[1].split('%')[0])
features['BowtiePE_con_multiple'] = float(lines[4].split('(')[1].split('%')[0])
features['BowtiePE_dis_uniquely'] = float(lines[7].split('(')[1].split('%')[0])
features['BowtiePE_cod_no_mapping'] = float(lines[11].split('(')[1].split('%')[0])
features['BowtiePE_cod_uniquely'] = float(lines[12].split('(')[1].split('%')[0])
features['BowtiePE_cod_multiple'] = float(lines[13].split('(')[1].split('%')[0])
features['BowtiePE_overall'] = float(lines[14].split('%')[0])
# for mixed Bowtie
features['BowtieMI_no_mapping'] = features['BowtiePE_con_no_mapping']
features['BowtieMI_uniquely'] = features['BowtiePE_con_uniquely']
features['BowtieMI_multiple'] = features['BowtiePE_con_multiple']
features['BowtieMI_overall'] = features['BowtiePE_overall']
# for SE Bowtie
features['BowtieSE_no_mapping'] = features['BowtiePE_con_no_mapping']
features['BowtieSE_uniquely'] = features['BowtiePE_con_uniquely']
features['BowtieSE_multiple'] = features['BowtiePE_con_multiple']
features['BowtieSE_overall'] = features['BowtiePE_overall']
return features
def get_MAP_features(feature_file_path):
lines = open(feature_file_path, 'r').read()
if 'concordantly' in lines and 'discordantly' in lines:
return parse_BowtiePE(lines)
else:
return parse_BowtieSE(lines)
def get_LOC_features(feature_file_path):
features = {}
with open(feature_file_path, 'r') as f:
f.readline()
for line in f:
line = line.strip().split('\t')
feature_name = line[1]
feature_name = feature_name.replace('"', '')
feature_name = feature_name.replace("'", '')
feature_name = feature_name.replace(' (<=300)', '')
feature_name = feature_name.replace(' ', '_')
feature_name = 'readsAnno_'+feature_name
features[feature_name] = float(line[2])
return features
def get_TSS_features(feature_file_path):
tss = pd.read_csv(feature_file_path, sep='\t')
tss_dist = list(map(str,tss['tss_dist']))
feature_names = ['TSS_'+name if name[0] == '-' else 'TSS_+'+name for name in tss_dist]
feature_values = list(tss['perc'])
return dict(zip(feature_names, feature_values))
def generate_input_data(indir, feature_sets, run_type, medians, noVerbose=True, restrict=None):
print('Parsing input data...')
# initialize the specific parser functions
parsers = {}
parsers['RAW'] = get_RAW_features
parsers['MAP'] = get_MAP_features
parsers['LOC'] = get_LOC_features
parsers['TSS'] = get_TSS_features
# parse input data and create an input data frame
if indir[-1] != '/':
indir += '/'
parsed_input = {}
for subdir, dirs, files in os.walk(indir):
for feature_file in files:
file_path = indir + feature_file
sample_ID = feature_file[:-4]
if restrict != None:
if restrict != sample_ID:
continue
feature_set = feature_file[-3:]
if os.path.exists(file_path):
if feature_file[-4:] in [ '.'+fs for fs in feature_sets ]:
if not sample_ID in parsed_input:
parsed_input[sample_ID] = {}
features = parsers[feature_set](file_path)
parsed_input[sample_ID].update(features)
feature_prefix = {'RAW': 'FastQC', 'MAP': 'BowtieMI',
'LOC': 'readsAnno', 'TSS': 'TSS'}
if run_type != 'generic':
feature_prefix['MAP'] = 'BowtieSE' if run_type == 'single-end' else 'BowtiePE'
feature_cols = []
for abbr in feature_sets:
prefix = feature_prefix[abbr]
col_names = list(filter(lambda x: x[:len(prefix)] == prefix, medians.keys()))
feature_cols += col_names
feature_cols = sorted(feature_cols)
missing = False
input_data = dict( (col, []) for col in ['sampleID'] + feature_cols )
for sample in parsed_input:
input_data['sampleID'].append(sample)
for col in feature_cols:
if col in parsed_input[sample]:
input_data[col].append(parsed_input[sample][col])
else:
missing = True
input_data[col].append(np.nan)
if col in feature_cols and not noVerbose:
print('\nWarning! The feature "%s" is missing for %s'%(col, sample))
if missing and not noVerbose:
print('\nMissing values will be imputed by median.')
print('However, you might check your input data.\n')
input_data = | pd.DataFrame(input_data) | pandas.DataFrame |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for running multiquantiles experiments and plotting the results."""
import enum
import functools
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dp_multiq import base
from dp_multiq import csmooth
from dp_multiq import ind_exp
from dp_multiq import joint_exp
from dp_multiq import smooth
from dp_multiq import tree
class ErrorMetric(enum.Enum):
MISCLASSIFIED_POINTS = 1
DISTANCE = 2
_ERROR_FUNCS = {
ErrorMetric.MISCLASSIFIED_POINTS:
base.misclassified_points_error,
ErrorMetric.DISTANCE:
lambda _, true_qs, est_qs: base.distance_error(true_qs, est_qs)
}
_ERROR_LABELS = {
ErrorMetric.MISCLASSIFIED_POINTS: "avg # misclassified points",
ErrorMetric.DISTANCE: "avg distance"
}
class QuantilesEstimationMethod(enum.Enum):
JOINT_EXP = 1
IND_EXP = 2
APP_IND_EXP = 3
SMOOTH = 4
CSMOOTH = 5
LAP_TREE = 6
GAUSS_TREE = 7
_PARTIAL_METHODS = {
QuantilesEstimationMethod.JOINT_EXP: joint_exp.joint_exp,
QuantilesEstimationMethod.IND_EXP: ind_exp.ind_exp,
QuantilesEstimationMethod.APP_IND_EXP: ind_exp.ind_exp,
QuantilesEstimationMethod.SMOOTH: smooth.smooth,
QuantilesEstimationMethod.CSMOOTH: csmooth.csmooth,
QuantilesEstimationMethod.LAP_TREE: tree.tree,
QuantilesEstimationMethod.GAUSS_TREE: tree.tree
}
_PLOT_LABELS = {
QuantilesEstimationMethod.JOINT_EXP: "JointExp",
QuantilesEstimationMethod.IND_EXP: "IndExp",
QuantilesEstimationMethod.APP_IND_EXP: "AppIndExp",
QuantilesEstimationMethod.SMOOTH: "Smooth",
QuantilesEstimationMethod.CSMOOTH: "CSmooth",
QuantilesEstimationMethod.LAP_TREE: "LapTree",
QuantilesEstimationMethod.GAUSS_TREE: "GaussTree"
}
_PLOT_LINESTYLES = {
QuantilesEstimationMethod.JOINT_EXP: "-",
QuantilesEstimationMethod.IND_EXP: "--",
QuantilesEstimationMethod.APP_IND_EXP: "--",
QuantilesEstimationMethod.SMOOTH: "-.",
QuantilesEstimationMethod.CSMOOTH: "-.",
QuantilesEstimationMethod.LAP_TREE: ":",
QuantilesEstimationMethod.GAUSS_TREE: ":"
}
_PLOT_COLORS = {
QuantilesEstimationMethod.JOINT_EXP: "lightseagreen",
QuantilesEstimationMethod.IND_EXP: "mediumpurple",
QuantilesEstimationMethod.APP_IND_EXP: "darkorange",
QuantilesEstimationMethod.SMOOTH: "cornflowerblue",
QuantilesEstimationMethod.CSMOOTH: "violet",
QuantilesEstimationMethod.LAP_TREE: "firebrick",
QuantilesEstimationMethod.GAUSS_TREE: "peru"
}
def synthetic_comparison(methods, error_func, data_type, num_samples, data_low,
data_high, num_trials, num_quantiles_range, eps, delta,
swap, ts_matrix):
"""Returns errors and times from running experients on synthetic data.
Args:
methods: Array of private quantiles algorithms to test.
error_func: Function for computing quantile estimation error.
data_type: Type of synthetic data to use, either uniform or gaussian.
num_samples: Number of samples to use in each trial.
data_low: Lower bound for data, used by private quantiles algorithms.
data_high: Upper bound for data, used by private quantiles algorithms.
num_trials: Number of trials to average over.
num_quantiles_range: Array of numbers of quantiles to estimate.
eps: Privacy parameter epsilon.
delta: Privacy parameter delta, used only by smooth.
swap: If true, uses swap privacy definition. Otherwise uses add-remove.
ts_matrix: Matrix of smooth sensitivity parameters passed to CSmooth, where
ts_matrix[i,j] corresponds to quantile j+1 of num_quantiles_range[i]
quantiles.
Returns:
Arrays errors and times storing, respectively, average number of
misclassified points and time in seconds for each of the five methods and
each num_quantiles in num_quantiles_range, for the specified synthetic data.
"""
max_num_quantiles = len(num_quantiles_range)
num_methods = len(methods)
errors = np.zeros((num_methods, max_num_quantiles))
times = np.zeros((num_methods, max_num_quantiles))
for num_quantiles_idx in range(max_num_quantiles):
num_quantiles = num_quantiles_range[num_quantiles_idx]
qs = np.linspace(0, 1, num_quantiles + 2)[1:-1]
ts = ts_matrix[num_quantiles_idx]
errors[:, num_quantiles_idx], times[:, num_quantiles_idx] = comparison(
methods, error_func, np.empty(0), data_type, num_samples, data_low,
data_high, num_trials, qs, eps, delta, swap, ts)
print("Finished num_quantiles = " + str(num_quantiles))
return errors, times
def real_comparison(methods, error_func, data_type, num_samples, data_low,
data_high, num_trials, num_quantiles_range, eps, delta,
swap, ts_matrix):
"""Returns errors and times from running experiments on real data.
Args:
methods: Array of private quantiles algorithms to test.
error_func: Function for computing quantile estimation error.
data_type: Type of real data to use, either ratings or pages.
num_samples: Number of samples to use in each trial.
data_low: Lower bound for data, used by private quantiles algorithms.
data_high: Upper bound for data, used by private quantiles algorithms.
num_trials: Number of trials to average over.
num_quantiles_range: Array of number of quantiles to estimate.
eps: Privacy parameter epsilon.
delta: Privacy parameter delta, used only by Smooth.
swap: If true, uses swap privacy definition. Otherwise uses add-remove.
ts_matrix: Matrix of smooth sensitivity parameters passed to CSmooth, where
ts_matrix[i,j] corresponds to quantile j+1 of num_quantiles_range[i]
quantiles.
Returns:
Arrays errors and times storing, respectively, average number of
misclassified points and time in seconds for each of the five methods and
each num_quantiles in num_quantiles_range, for the specified real data.
"""
max_num_quantiles = len(num_quantiles_range)
num_methods = len(methods)
errors = np.zeros((num_methods, max_num_quantiles))
times = np.zeros((num_methods, max_num_quantiles))
if data_type == "ratings":
data = pd.read_csv("books.csv", usecols=["average_rating"])
data = pd.to_numeric(data["average_rating"], errors="coerce").to_numpy()
data = data[~np.isnan(data)]
else:
data = | pd.read_csv("books.csv", usecols=[" num_pages"]) | pandas.read_csv |
#!/usr/bin/env python3
from asyncore import loop
import math
import datetime
import argparse
from pkgutil import get_data
import shutil
from webbrowser import get
from numpy import fft
import urllib3
import argparse
import requests
import re
import os
import pandas as pd
import urllib3
from alive_progress import alive_bar
from bs4 import BeautifulSoup
from colorama import Fore, Style
from datetime import date
from geographiclib.geodesic import Geodesic
work_dir = os.getcwd()
# pandas init
dfColumns = ['name', 'callsign', 'frequency', 'boundary', 'upper_fl', 'lower_fl', 'class']
df_fir = | pd.DataFrame(columns=dfColumns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 9 20:45:27 2021
@author: HaoLI
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_curve, auc, roc_auc_score ###计算roc和auc
from sklearn.model_selection import train_test_split
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from xgboost import Booster
from xgboost import DMatrix
import datetime
import time
from sklearn.preprocessing import StandardScaler
# plot feature importance manually
from numpy import loadtxt
from matplotlib import pyplot
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import lightgbm as lgb
# check and set the working directory
os.getcwd()
#os.chdir('/Users/HaoLI/Dropbox/FinTech/raw_data')
os.chdir('/Users/HaoLI/Stata/credit/data')
df = pd.read_csv('data1210rename_use.csv')
col_names = list(df.columns.values[3:30])
col_names.remove('default_geq_1') #X中不能包含目标函数y
col_names.remove('default_geq_2')
col_names.remove('default_geq_3')
base_col_names = col_names[0:13] # for baseline model 仅仅包含银行数据+早中晚,而不包含消费数据
df_fillna = df.fillna(0) # fill NA with 0. 无消费以0计
X = df_fillna[col_names]
y = df_fillna.default_geq_1 # Target variable
X_base = df_fillna[base_col_names]
y_base = df_fillna.default_geq_1 # Target variable
#Specifying the parameter
n_estimators=100
learning_rate=0.1
max_depth=6
num_leaves=16
feature_fraction=1
bagging_fraction=1
verbosity=20
num_boost_round=20000
verbose_eval=1000
early_stopping_rounds=200
reg_alpha=2
reg_lambda=15
reduction_rate=[]
for random_state in range(0,15):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = random_state)
#如果 random_state = None (默认值),会随机选择一个种子,这样每次都会得到不同的数据划分。给 random_state 设置相同的值,那么当别人重新运行你的代码的时候就能得到完全一样的结果,复现和你一样的过程。
X_base_train, X_base_test, y_base_train, y_base_test = train_test_split(X_base, y_base, test_size = 0.30, random_state = random_state)
ros = RandomOverSampler(random_state=0)
X_train, y_train = ros.fit_resample(X_train, y_train)
X_base_train, y_base_train = ros.fit_resample(X_base_train, y_base_train)
#min_max_scaler = MinMaxScaler()
#X_train = min_max_scaler.fit_transform(X_train)
#X_test = min_max_scaler.fit_transform(X_test)
#sc = StandardScaler()
#X_train = sc.fit_transform(X_train)
#X_test = sc.fit_transform(X_test)
#converting the dataset into proper LGB format
train_matrix=lgb.Dataset(X_train, label=y_train)
valid_matrix= lgb.Dataset(X_test, label=y_test)
train_matrix_base=lgb.Dataset(X_base_train, label=y_base_train)
valid_matrix_base= lgb.Dataset(X_base_test, label=y_base_test)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
"n_estimators":n_estimators,
'learning_rate': learning_rate,#较小的学习率,较大的决策树个数
'max_depth': max_depth,#树的最大深度,防止过拟合
'num_leaves': num_leaves,
'feature_fraction': feature_fraction, #每次选择所有的特征训练树
'bagging_fraction': bagging_fraction,
}
classifier=lgb.train(params, train_set=train_matrix, valid_sets=valid_matrix, num_boost_round=num_boost_round, verbose_eval=verbose_eval, early_stopping_rounds=early_stopping_rounds)
y_train_pred = classifier.predict(X_train)
y_test_pred = classifier.predict(X_test) #可以加weight 0.5
fullmodelperc = np.percentile(y_test_pred,[95,90,80,70,60,50] )
full_rej_perc_5 = fullmodelperc[0]
full_rej_perc_10 = fullmodelperc[1]
full_rej_perc_20 = fullmodelperc[2]
full_rej_perc_30 = fullmodelperc[3]
full_rej_perc_40 = fullmodelperc[4]
full_rej_perc_50 = fullmodelperc[5]
classifier=lgb.train(params, train_set=train_matrix_base, valid_sets=valid_matrix_base, num_boost_round=num_boost_round, verbose_eval=verbose_eval, early_stopping_rounds=early_stopping_rounds)
y_base_train_pred = classifier.predict(X_base_train)
y_base_test_pred = classifier.predict(X_base_test)#可以加weight 0.5
basemodelperc = np.percentile(y_base_test_pred,[95,90,80,70,60,50] )
base_rej_perc_5 = basemodelperc[0]
base_rej_perc_10 = basemodelperc[1]
base_rej_perc_20 = basemodelperc[2]
base_rej_perc_30 = basemodelperc[3]
base_rej_perc_40 = basemodelperc[4]
base_rej_perc_50 = basemodelperc[5]
print("full model rejection rate[5,10,20,30,40,50]: %s"%fullmodelperc )# get percentile of array y_test_pred
print("baseline model rejection rate[5,10,20,30,40,50]: %s"%basemodelperc )# get percentile of array y_test_pred
#记录base model该循环中的rejection rate为5%,10%,20%,30%,40%,50%时候的违约率
df_base = np.vstack((y_test,y_base_test_pred))
df_base = pd.DataFrame(df_base)
df_base = df_base.transpose()
df_base.columns = ["label", "pred_prob"]
def_rate_5_base = df_base[df_base["pred_prob"]<=base_rej_perc_5]['label'].sum()/(df_base.shape[0]*0.95) #计算rejection rate为5%时候的违约率,test中的
def_rate_10_base = df_base[df_base["pred_prob"]<=base_rej_perc_10]['label'].sum()/(df_base.shape[0]*0.9) #计算rejection rate为10%时候的违约率,test中的
def_rate_20_base = df_base[df_base["pred_prob"]<=base_rej_perc_20]['label'].sum()/(df_base.shape[0]*0.8) #计算rejection rate为20%时候的违约率,test中的
def_rate_30_base = df_base[df_base["pred_prob"]<=base_rej_perc_30]['label'].sum()/(df_base.shape[0]*0.7) #计算rejection rate为30%时候的违约率,test中的
def_rate_40_base = df_base[df_base["pred_prob"]<=base_rej_perc_40]['label'].sum()/(df_base.shape[0]*0.6) #计算rejection rate为40%时候的违约率,test中的
def_rate_50_base = df_base[df_base["pred_prob"]<=base_rej_perc_50]['label'].sum()/(df_base.shape[0]*0.5) #计算rejection rate为50%时候的违约率,test中的
#记录full model该循环中的rejection rate为5%,10%,20%,30%,40%,50%时候的违约率
df_full = np.vstack((y_test,y_test_pred))
df_full = | pd.DataFrame(df_full) | pandas.DataFrame |
import math
import io
import json
import os
import string
import configargparse
import numpy as np
import pandas as pd
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer
# --------------------------------------------------------------------------------
# Classes
# https://github.com/taki0112/Vector_Similarity
class TS_SS:
def Cosine(self, vec1: np.ndarray, vec2: np.ndarray):
return np.dot(vec1, vec2.T)/(np.linalg.norm(vec1) * np.linalg.norm(vec2))
def VectorSize(self, vec: np.ndarray):
return np.linalg.norm(vec)
def Euclidean(self, vec1: np.ndarray, vec2: np.ndarray):
return np.linalg.norm(vec1-vec2)
def Theta(self, vec1: np.ndarray, vec2: np.ndarray):
return np.arccos(self.Cosine(vec1, vec2)) + np.radians(10)
def Triangle(self, vec1: np.ndarray, vec2: np.ndarray):
theta = np.radians(self.Theta(vec1, vec2))
return (self.VectorSize(vec1) * self.VectorSize(vec2) * np.sin(theta))/2
def Magnitude_Difference(self, vec1: np.ndarray, vec2: np.ndarray):
return abs(self.VectorSize(vec1) - self.VectorSize(vec2))
def Sector(self, vec1: np.ndarray, vec2: np.ndarray):
ED = self.Euclidean(vec1, vec2)
MD = self.Magnitude_Difference(vec1, vec2)
theta = self.Theta(vec1, vec2)
return math.pi * (ED + MD)**2 * theta/360
def __call__(self, vec1: np.ndarray, vec2: np.ndarray):
try:
m = self.Triangle(vec1, vec2) * self.Sector(vec1, vec2)
v = m.item((0, 0))
return 0 if np.isnan(v) else v
except:
pass
return 0
# --------------------------------------------------------------------------------
# Methods
def is_interactive():
return __name__ == '__main__'
def corpus_dir():
return os.path.join(os.path.dirname(__file__), 'corpus')
def load_corpus(task):
corpus = [] # The text of the file goes here
labels = [] # The name of the file goes here
task_dir = os.path.join(corpus_dir(), task)
for currentDir, _, files in os.walk(task_dir):
# Get the absolute path of the currentDir parameter
currentDir = os.path.abspath(currentDir)
# Traverse through all files
for fileName in files:
fullPath = os.path.join(currentDir, fileName)
with io.open(fullPath, 'r', encoding='utf-8', errors='ignore') as f:
contents = f.read()
if 'orig' not in fileName:
corpus.append(contents)
labels.append(fileName)
else:
# The original is the first entry
corpus.insert(0, contents)
labels.insert(0, fileName)
return corpus, labels
def load_metadata():
fileName = os.path.join(corpus_dir(), 'corpus-final09.xls')
df = pd.read_excel(fileName, index_col=0, sheet_name='File list')
return df
def filter_tokens(doc):
for token in doc:
if (
not token.is_punct and
not token.is_space
):
yield token
# --------------------------------------------------------------------------------
# Lambda entry
def run(options):
# Load the metadata
mdf = load_metadata()
# Load the sources
corpus, labels = load_corpus(options.task)
# Have spaCy process the documents - with just basic tokenizing
nlp = spacy.load("en_core_web_sm", exclude=['ner', 'lemmatizer', 'textcat'])
docs = [
' '.join([t.norm_ for t in filter_tokens(doc)])
for doc in nlp.pipe(corpus)
]
# Prepare a TF-IDF vectorizer that reads sing words and two-word pairs
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1, 2), sublinear_tf=True)
tfidf_vectors = tfidf_vectorizer.fit_transform(docs)
# Peek at the raw values
# df = pd.DataFrame(tfidf_vectors.T.todense(),
# index=tfidf_vectorizer.get_feature_names())
# print(df)
# Score similarity
similarity = TS_SS()
input_vector = tfidf_vectors[0].todense()
_scores = [
similarity(input_vector, tfidf_vectors[i].todense())
for i in range(1, len(docs))
]
# Create a Pandas series from the scores
ts_ss_scores = | pd.Series(_scores, index=labels[1:], name='difference') | pandas.Series |
import logging
import os
import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (PreviousIterationProperty,
ProcessingPlasmaProperty)
from tardis.plasma.properties import PhiSahaNebular, PhiSahaLTE
__all__ = ['PreviousElectronDensities', 'PreviousBetaSobolev',
'HeliumNLTE', 'HeliumNumericalNLTE']
logger = logging.getLogger(__name__)
class PreviousElectronDensities(PreviousIterationProperty):
outputs = ('previous_electron_densities',)
def set_initial_value(self, kwargs):
initial_value = np.ones(len(kwargs['abundance'].columns))*1000000.0
self._set_initial_value(initial_value)
class PreviousBetaSobolev(PreviousIterationProperty):
outputs = ('previous_beta_sobolev',)
def set_initial_value(self, kwargs):
try:
lines = len(kwargs['atomic_data'].lines)
except:
lines = len(kwargs['atomic_data']._lines)
initial_value = np.ones((lines,
len(kwargs['abundance'].columns)))
self._set_initial_value(initial_value)
class HeliumNLTE(ProcessingPlasmaProperty):
outputs = ('helium_population',)
def calculate(self, level_boltzmann_factor, electron_densities,
ionization_data, beta_rad, g, g_electron, w, t_rad, t_electrons,
delta, zeta_data, number_density, partition_function):
helium_population = level_boltzmann_factor.ix[2].copy()
# He I excited states
he_one_population = self.calculate_helium_one(g_electron, beta_rad,
partition_function, ionization_data, level_boltzmann_factor,
electron_densities, g, w, t_rad, t_electrons)
helium_population.ix[0].update(he_one_population)
#He I metastable states
helium_population.ix[0].ix[1] *= (1 / w)
helium_population.ix[0].ix[2] *= (1 / w)
#He I ground state
helium_population.ix[0].ix[0] = 0.0
#He II excited states
he_two_population = level_boltzmann_factor.ix[2,1].mul(
(g.ix[2,1].ix[0]**(-1)))
helium_population.ix[1].update(he_two_population)
#He II ground state
helium_population.ix[1].ix[0] = 1.0
#He III states
helium_population.ix[2].ix[0] = self.calculate_helium_three(t_rad, w,
zeta_data, t_electrons, delta, g_electron, beta_rad,
partition_function, ionization_data, electron_densities)
unnormalised = helium_population.sum()
normalised = helium_population.mul(number_density.ix[2] / unnormalised)
helium_population.update(normalised)
return helium_population
@staticmethod
def calculate_helium_one(g_electron, beta_rad, partition_function,
ionization_data, level_boltzmann_factor, electron_densities, g,
w, t_rad, t_electron):
(partition_function_index, ionization_data_index, partition_function,
ionization_data) = HeliumNLTE.filter_with_helium_index(2, 1,
partition_function, ionization_data)
phis = (1 / PhiSahaLTE.calculate(g_electron, beta_rad,
partition_function, ionization_data)) * electron_densities * \
(1.0/g.ix[2,1,0]) * (1/w) * (t_rad/t_electron)**(0.5)
return level_boltzmann_factor.ix[2].ix[0].mul(
| pd.DataFrame(phis.ix[2].ix[1].values) | pandas.DataFrame |
########################################
### IMPORT MODULES ###
########################################
from geopy.distance import great_circle
from collections import defaultdict
import sys
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from random import uniform
from scipy.interpolate import griddata
########################################
### LOAD DATA ###
########################################
# Prepare Transit Data Tables
df_transit = | pd.read_csv('dataset/full_routes.csv') | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from functools import partial
from threading import Thread
from typing import Callable
from joblib import Parallel, delayed
from joblib._parallel_backends import MultiprocessingBackend
import pandas as pd
from queue import Queue
class ParallelExt(Parallel):
def __init__(self, *args, **kwargs):
maxtasksperchild = kwargs.pop("maxtasksperchild", None)
super(ParallelExt, self).__init__(*args, **kwargs)
if isinstance(self._backend, MultiprocessingBackend):
self._backend_args["maxtasksperchild"] = maxtasksperchild
def datetime_groupby_apply(df, apply_func, axis=0, level="datetime", resample_rule="M", n_jobs=-1, skip_group=False):
"""datetime_groupby_apply
This function will apply the `apply_func` on the datetime level index.
Parameters
----------
df :
DataFrame for processing
apply_func :
apply_func for processing the data
axis :
which axis is the datetime level located
level :
which level is the datetime level
resample_rule :
How to resample the data to calculating parallel
n_jobs :
n_jobs for joblib
Returns:
pd.DataFrame
"""
def _naive_group_apply(df):
return df.groupby(axis=axis, level=level).apply(apply_func)
if n_jobs != 1:
dfs = ParallelExt(n_jobs=n_jobs)(
delayed(_naive_group_apply)(sub_df) for idx, sub_df in df.resample(resample_rule, axis=axis, level=level)
)
return | pd.concat(dfs, axis=axis) | pandas.concat |
from __future__ import annotations
import sys
from collections import namedtuple
from datetime import datetime
from textwrap import TextWrapper
from typing import List, Dict, Any, Union, Optional, Type
from colorama import Fore, Style, Back
from pandas import DataFrame, Series
from formulacli.banners import Banner, DESCRIPTION
from formulacli.drivers import fetch_drivers, fetch_driver
from formulacli.exceptions import ExitException
from formulacli.img_converter import convert_image
from formulacli.news import fetch_top_stories
from formulacli.result_tables import fetch_results
if sys.platform in ['linux', 'linux2', 'darwin']:
from getch import getch as read_key
elif sys.platform == 'win32':
from msvcrt import getch as read_key
Command = namedtuple("Command", ['cmd', 'label'])
Option = namedtuple("Option", ['opt', 'label'])
Message = namedtuple("Message", ['msg', 'type'])
BANNER = Banner()
class Context:
history: List[Any] = []
messages: List[Message] = []
block_render: bool = True
def __init__(self) -> None:
self.state: Dict[str, Any] = {
'name': "context",
'next_ctx': self,
'next_ctx_args': {},
'command': '',
'custom_commands': [],
'menu_options': [],
'show_banner': False,
'string_input': False
}
def __str__(self):
return self.state['name']
def render(self) -> None:
if self.state['show_banner']:
print(self.banner)
self.show_options()
self.event()
print()
self.show_messages()
print("Press h for help.")
self.state['command'] = cmd = self.get_commands()
if cmd.lower() in ['q', 'quit', 'exit']:
raise ExitException
if cmd.lower() in ['m', 'menu']:
self.state['next_ctx'] = MainContext
self.state['next_ctx_args'] = {}
return
if cmd.lower() in ['b', 'back']:
try:
Context.history.pop()
self.state['next_ctx'] = Context.history[-1]
self.state['next_ctx_args'] = {}
except IndexError:
self.state['next_ctx'] = MainContext
self.state['next_ctx_args'] = {}
return
if cmd.lower() in ['?', 'h', 'help']:
self.show_help()
self.state['next_ctx'] = Context.history[-1]
return
if cmd.lower() == '\'':
self.state['string_input'] = True
self.action_handler()
def action_handler(self) -> None:
pass
def event(self) -> None:
pass
def add_to_history(self) -> None:
Context.history.append(self)
def show_options(self) -> None:
template = "[{opt}] {label}"
for option in self.state['menu_options']:
self._pprint(template.format(opt=option.opt, label=option.label), margin=2)
print()
def show_messages(self) -> None:
messages: List[Message] = Context.messages
while messages:
message: Message = messages.pop()
if message.type == 'error':
self._pprint(f"{Fore.RED}{message.msg}{Style.RESET_ALL}", margin=10)
elif message.type == 'success':
self._pprint(f"{Fore.LIGHTGREEN_EX}{message.msg}{Style.RESET_ALL}", margin=10)
elif message.type == 'debug':
self._pprint(f"{Fore.LIGHTCYAN_EX}{message.msg}{Style.RESET_ALL}", margin=10)
def show_help(self) -> None:
commands: List[Command] = []
commands += [Command(cmd='\'', label="Write command")]
commands += self.state['custom_commands']
if len(Context.history) > 1:
commands.append(Command(cmd='b', label="Back"))
commands += [
Command(cmd='h', label="Show commands"),
Command(cmd='m', label="Menu"),
Command(cmd='q', label="Quit")
]
commands_df = | DataFrame(commands, columns=["Commands", "?"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 20:03:05 2018
@author: gtrancourt
"""
import numpy as np
import os
from pandas import DataFrame
from skimage import transform, img_as_bool, img_as_int, img_as_ubyte, img_as_float32
import skimage.io as io
from skimage.measure import label, marching_cubes_lewiner, mesh_surface_area, regionprops
import zipfile
#import cv2
def Trim_Individual_Stack(large_stack, small_stack):
print("***trimming stack***")
dims = np.array(binary_stack.shape, dtype='float') / np.array(raw_pred_stack.shape, dtype='float')
if np.all(dims <= 2):
return large_stack
else:
if dims[1] > 2:
if (large_stack.shape[1]-1)/2 == small_stack.shape[1]:
large_stack = np.delete(large_stack, large_stack.shape[1]-1, axis=1)
else:
if (large_stack.shape[1]-2)/2 == small_stack.shape[1]:
large_stack = np.delete(large_stack, np.arange(large_stack.shape[1]-2, large_stack.shape[1]), axis=1)
if dims[2] > 2:
if (large_stack.shape[2]-1)/2 == small_stack.shape[2]:
large_stack = np.delete(large_stack, large_stack.shape[2]-1, axis=2)
else:
if (large_stack.shape[2]-2)/2 == small_stack.shape[2]:
large_stack = np.delete(large_stack, np.arange(large_stack.shape[2]-2, large_stack.shape[2]), axis=2)
return large_stack
#%%
#Pixel dimmension
px_edge = 0.1625 #µm
vx_volume = px_edge**3
#Define de values of the different tissues
epid_value = 51 #69
bg_value = 204 #177
spongy_value = 0
palisade_value = 0
if spongy_value == palisade_value:
mesophyll_value = spongy_value
else:
mesophyll_value = [spongy_value, palisade_value]
ias_value = 255
vein_value = 102 # 147
bs_value = 153
#%%
#Load segmented image
base_folder_name = '/run/media/gtrancourt/GTR_Touro/Vitis_Shade_Drought/DONE_Klara/'
sample_name = 'C_I_12_Strip2_'
folder_name = 'MLresults/'
binary_filename = sample_name + 'BINARY-8bit.tif'
raw_ML_prediction_name = sample_name + 'fullstack_prediction.tif'
filepath = base_folder_name + sample_name + '/'
#%%
# Check if the file has already been processed -- Just in case!
if os.path.isfile(filepath + sample_name + 'RESULTS.txt'):
print('This file has already been processed!')
assert False
#%%
# Load the ML segmented stack
raw_pred_stack = io.imread(filepath + folder_name + raw_ML_prediction_name)
print((np.unique(raw_pred_stack[100])))
io.imshow(raw_pred_stack[100])
# Trim at the edges -- The ML does a bad job there
# Here I remove 50 slices at the beginning and the end,
# and 40 pixels at the left and right edges
trim_slices = 80
trim_column = 40
raw_pred_stack = raw_pred_stack[trim_slices:-trim_slices,:,trim_column:-trim_column]
io.imshow(raw_pred_stack[100])
#%%
# Sometimes, the output values are different than what is written above.
# This is a way to assign values. But you have to double-check that the other is right.
#mesophyll_value, bg_value, epid_value, vein_value, ias_value = np.split(np.unique(raw_pred_stack[100]), len(np.unique(raw_pred_stack[100])))
#Define de values of the different tissues
epid_value = 64
bg_value = 191
mesophyll_value = 0
ias_value = 255
vein_value = 128
#%%
###################
## EPIDERMIS
###################
# Label all of the epidermis regions
unique_epidermis_volumes = label(raw_pred_stack == epid_value, connectivity=1)
props_of_unique_epidermis = regionprops(unique_epidermis_volumes)
io.imshow(unique_epidermis_volumes[100])
#%%
# Find the size and properties of the epidermis regions
epidermis_area = np.zeros(len(props_of_unique_epidermis))
epidermis_label = np.zeros(len(props_of_unique_epidermis))
epidermis_centroid = np.zeros([len(props_of_unique_epidermis),3])
for regions in np.arange(len(props_of_unique_epidermis)):
epidermis_area[regions] = props_of_unique_epidermis[regions].area
epidermis_label[regions] = props_of_unique_epidermis[regions].label
epidermis_centroid[regions] = props_of_unique_epidermis[regions].centroid
# Find the two largest epidermis
ordered_epidermis = np.argsort(epidermis_area)
print('The two largest values below should be in the same order of magnitude')
print((epidermis_area[ordered_epidermis[-4:]]))
print('The center of the epidermis should be more or less the same on the 1st and 3rd columns')
print((epidermis_centroid[ordered_epidermis[-4:]]))
two_largest_epidermis = (unique_epidermis_volumes == ordered_epidermis[-1]+1) | (unique_epidermis_volumes == ordered_epidermis[-2]+1)
#Check if it's correct
#io.imsave(filepath + folder_name + 'test_epidermis.tif',
# img_as_ubyte(two_largest_epidermis))
io.imshow(two_largest_epidermis[100])
#%%
# Get the values again: makes it cleaner
unique_epidermis_volumes = label(two_largest_epidermis, connectivity=1)
props_of_unique_epidermis = regionprops(unique_epidermis_volumes)
epidermis_area = np.zeros(len(props_of_unique_epidermis))
epidermis_label = np.zeros(len(props_of_unique_epidermis))
epidermis_centroid = np.zeros([len(props_of_unique_epidermis),3])
for regions in np.arange(len(props_of_unique_epidermis)):
epidermis_area[regions] = props_of_unique_epidermis[regions].area
epidermis_label[regions] = props_of_unique_epidermis[regions].label
epidermis_centroid[regions] = props_of_unique_epidermis[regions].centroid
#io.imshow(unique_epidermis_volumes[100])
# Transform the array to 8-bit: no need for the extra precision as there are only 3 values
unique_epidermis_volumes = np.array(unique_epidermis_volumes, dtype='uint8')
# Find the fvalues of each epidermis: assumes adaxial epidermis is at the top of the image
adaxial_epidermis_value = unique_epidermis_volumes[100,:,100][(unique_epidermis_volumes[100,:,100] != 0).argmax()]
abaxial_epidermis_value = int(np.arange(start=1,stop=3)[np.arange(start=1,stop=3) != adaxial_epidermis_value])
# Compute volume
epidermis_adaxial_volume = epidermis_area[adaxial_epidermis_value - 1] * (px_edge * (px_edge*2)**2)
epidermis_abaxial_volume = epidermis_area[abaxial_epidermis_value - 1] * (px_edge * (px_edge*2)**2)
# Tichkness return a 2D array, i.e. the thcikness of each column
epidermis_abaxial_thickness = np.sum((unique_epidermis_volumes == abaxial_epidermis_value), axis=1) * (px_edge*2)
epidermis_adaxial_thickness = np.sum((unique_epidermis_volumes == adaxial_epidermis_value), axis=1) * (px_edge*2)
#%%
###################
## VEINS
###################
# Get the veins volumes
unique_vein_volumes = label(raw_pred_stack == vein_value, connectivity=1)
props_of_unique_veins = regionprops(unique_vein_volumes)
io.imshow(unique_vein_volumes[100])
#%%
veins_area = np.zeros(len(props_of_unique_veins))
veins_label = np.zeros(len(props_of_unique_veins))
veins_centroid = np.zeros([len(props_of_unique_veins),3])
for regions in np.arange(len(props_of_unique_veins)):
veins_area[regions] = props_of_unique_veins[regions].area
veins_label[regions] = props_of_unique_veins[regions].label
veins_centroid[regions] = props_of_unique_veins[regions].centroid
# Find the largest veins
ordered_veins = np.argsort(veins_area)
#veins_area[ordered_veins[-80:]]
#veins_area[ordered_veins[:1000]]
#veins_centroid[ordered_veins[-4:]]
#print(np.sum(veins_area <= 1000))
# I found that for my images, a threshold of 100000 (1e5) pixel^3 removed
# the noise left by the segmentation method and kept only the largest veins.
# This should be adjusted depending on the species/images/maginification.
large_veins_ids = veins_label[veins_area > 100000]
largest_veins = np.in1d(unique_vein_volumes, large_veins_ids).reshape(raw_pred_stack.shape)
# Get the values again
vein_volume = np.sum(largest_veins) * (px_edge * (px_edge*2)**2)
#Check if it's correct
#io.imsave(base_folder_name + sample_name + '/' + folder_name + 'test_veins.tif',
# img_as_ubyte(largest_veins))
io.imshow(largest_veins[100])
#%%
###################
## AIRSPACE
###################
#########################################
## CREATE THE FULLSIZE SEGMENTED STACK ##
#########################################
# My segmenteation procedure used a reduced size stack, since my original
# images are too big to be handled. I do want to use my original images for
# their quality and details, so I use the binary image and add on top of it
# the background, epidermis, and veins that have been segmented. That way, I
# keep the detail I want at the airspace-cell interface, while still having a
# good background, epidermis, and vein segmentation to remove the tissues that
# are not need for some traits.
##############################
## LOADING THE BINARY STACK ##
## IN ORIGINAL SIZE ##
##############################
# I've started compressing my files. The code below extracts the file,
# loads it into memory, and then deletes the file (it's still in memory).
# The commented code at the end loads the uncompressed image.
#Load the compressed binary stack in the original dimensions
binary_zip = zipfile.ZipFile(filepath + binary_filename + '.zip', 'r')
binary_zip.extractall(filepath)
#binary_raw = binary_zip.open(filepath + sample_name + '/' + binary_filename)
# Open the image
binary_stack = img_as_bool(io.imread(filepath + sample_name + '/' + binary_filename))
# Trim the edges
binary_stack = binary_stack[trim_slices:-trim_slices,:,:]
binary_stack = binary_stack[:,:,(trim_column*2):(-trim_column*2)]
# Delete the uncompressed file
os.remove(base_folder_name + sample_name + '/' + sample_name + '/' + binary_filename)
os.rmdir(base_folder_name + sample_name + '/' + sample_name)
io.imshow(binary_stack[100])
#binary_stack = img_as_bool(io.imread(base_folder_name + sample_name + '/' + binary_filename))
#%%
#Check and trim the binary stack if necessary
# This is to match the dimensions between all images
# Basically, it trims odds numbered dimension so to be able to divide/multiply them by 2.
binary_stack = Trim_Individual_Stack(binary_stack, raw_pred_stack)
# TO MANUALLY DELETE SOME SLICES
#binary_stack = np.delete(binary_stack, 910, axis=1)
#binary_stack = np.delete(binary_stack, 482, axis=0)
#binary_stack = np.delete(binary_stack, np.arange(0, 160*2), axis=2)
#%%
# This cell creates an empty array filled with the backgroud color (177), then
# adds all of the leaf to it. Looping over each slice (this is more memory
# efficient than working on the whole stack), it takes the ML segmented image,
# resize the slice, and adds it to the empty array.
bg_value_new = 177
vein_value_new = 147
ias_value_new = 255
large_segmented_stack = np.full(shape=binary_stack.shape, fill_value=bg_value_new, dtype='uint8') # Assign an array filled with the background value 177.
for idx in np.arange(large_segmented_stack.shape[0]):
# Creates a boolean 2D array of the veins (from the largest veins id'ed earlier)
temp_veins = img_as_bool(transform.resize(largest_veins[idx],
[binary_stack.shape[1], binary_stack.shape[2]],
anti_aliasing=False, order=0))
# Creates a 2D array with the epidermis being assinged values 30 or 60
temp_epid = transform.resize(unique_epidermis_volumes[idx],
[binary_stack.shape[1], binary_stack.shape[2]],
anti_aliasing=False, preserve_range=True, order=0) * 30
# Creates a 2D mask of only the leaf to remove the backgroud from the
# original sized binary image.
leaf_mask = img_as_bool(transform.resize(raw_pred_stack[idx] != bg_value,
[binary_stack.shape[1], binary_stack.shape[2]],
anti_aliasing=False, order=0))
large_segmented_stack[idx][leaf_mask] = binary_stack[idx][leaf_mask] * ias_value_new #binary_stack is a boolean, so you need to multiply it.
large_segmented_stack[idx][temp_veins] = vein_value_new #vein_value
large_segmented_stack[idx][temp_epid != 0] = temp_epid[temp_epid != 0]
io.imshow(large_segmented_stack[100])
print('### Validate the values in the stack ###')
print((np.unique(large_segmented_stack[100])))
io.imsave(base_folder_name + sample_name + '/' + sample_name +'SEGMENTED.tif', large_segmented_stack, imagej=True)
#%%
################################################
## COMPUTE TRAITS ON THE ORIGINAL SIZED STACK ##
################################################
# Load the large segmented stack to re-run the calculations if needed
#large_segmented_stack = io.imread(base_folder_name + sample_name + '/' + sample_name +'SEGMENTED.tif')
#
#io.imshow(large_segmented_stack[100])
#print(np.unique(large_segmented_stack[100]))
#large_segmented_stack = np.delete(large_segmented_stack, np.arange(0,500), axis=0)
#%%
# Redefine the values for the different tissues as used in the segmented image.
# The epidermis will be defined later.
bg_value = 177
spongy_value = 0
palisade_value = 0
if spongy_value == palisade_value:
mesophyll_value = spongy_value
else:
mesophyll_value = [spongy_value, palisade_value]
ias_value = 255
vein_value = 147
# Find the values of each epidermis: assumes adaxial epidermis is at the top of the image
adaxial_epidermis_value = large_segmented_stack[100,:,100][(large_segmented_stack[100,:,100] != bg_value).argmax()]
if adaxial_epidermis_value == 30:
abaxial_epidermis_value = 60
else:
if adaxial_epidermis_value == 60:
abaxial_epidermis_value = 30
#Measure the different volumes
leaf_volume = np.sum(large_segmented_stack != bg_value) * vx_volume
mesophyll_volume = np.sum((large_segmented_stack != bg_value) & (large_segmented_stack != adaxial_epidermis_value) & (large_segmented_stack != abaxial_epidermis_value)) * vx_volume
cell_volume = np.sum(large_segmented_stack == mesophyll_value) * vx_volume
air_volume = np.sum(large_segmented_stack == ias_value) * vx_volume
epidermis_abaxial_volume = np.sum(large_segmented_stack == abaxial_epidermis_value) * vx_volume
epidermis_adaxial_volume = np.sum(large_segmented_stack == adaxial_epidermis_value) * vx_volume
vein_volume = np.sum(large_segmented_stack == vein_value) * vx_volume
print(leaf_volume)
print((cell_volume + air_volume + epidermis_abaxial_volume + epidermis_adaxial_volume + vein_volume))
#Measure the thickness of the leaf, the epidermis, and the mesophyll
leaf_thickness = np.sum(np.array(large_segmented_stack != bg_value, dtype='bool'), axis=1) * px_edge
mesophyll_thickness = np.sum((large_segmented_stack != bg_value) & (large_segmented_stack != adaxial_epidermis_value) & (large_segmented_stack != abaxial_epidermis_value), axis=1) * px_edge
epidermis_abaxial_thickness = np.sum(large_segmented_stack == abaxial_epidermis_value, axis=1) * px_edge
epidermis_adaxial_thickness = np.sum(large_segmented_stack == adaxial_epidermis_value, axis=1) * px_edge
print((np.median(leaf_thickness),leaf_thickness.mean(),leaf_thickness.std()))
print((np.median(mesophyll_thickness),mesophyll_thickness.mean(),mesophyll_thickness.std()))
print((np.median(epidermis_adaxial_thickness),epidermis_adaxial_thickness.mean(),epidermis_adaxial_thickness.std()))
print((np.median(epidermis_abaxial_thickness),epidermis_abaxial_thickness.mean(),epidermis_abaxial_thickness.std()))
#%%
# Leaf area
# I was lazy here as I assume the leaf is parallel to the border of the image.
leaf_area = large_segmented_stack.shape[0] * large_segmented_stack.shape[2] * (px_edge**2)
#Caluculate Surface Area (adapted from <NAME>' code)
# This take quite a lot of RAM
# This gives 1% less surface than from BoneJ's results,
# but way way faster (even if it's not that fast)!!!
ias_vert_faces = marching_cubes_lewiner(large_segmented_stack == ias_value)
ias_SA = mesh_surface_area(ias_vert_faces[0],ias_vert_faces[1])
true_ias_SA = ias_SA * (px_edge**2)
print(('IAS surface area: '+str(true_ias_SA)+' µm**2'))
print(('or '+str(float(true_ias_SA/1000000))+' mm**2'))
# end Matt's code
# NOTE ON SA CODE ABOVE
# The same procedure as for epidermises and veins could be done, i.e. using the
# label() function to identify all of the un-connected airspace volumes and
# compute the surface area on each of them. That way we can get the surface
# area of the largest airspace and the connectivity term presented in Earles
# et al. (2018) (Beyond porosity - Bromeliad paper).
print(('Sm: '+str(true_ias_SA/leaf_area)))
print(('Ames/Vmes: '+str(true_ias_SA/(mesophyll_volume-vein_volume))))
# Write the data into a data frame
data_out = {'LeafArea':leaf_area,
'LeafThickness':leaf_thickness.mean(),
'LeafThickness_SD':leaf_thickness.std(),
'MesophyllThickness':mesophyll_thickness.mean(),
'MesophyllThickness_SD':mesophyll_thickness.std(),
'ADEpidermisThickness':epidermis_adaxial_thickness.mean(),
'ADEpidermisThickness_SD':epidermis_adaxial_thickness.std(),
'ABEpidermisThickness':epidermis_abaxial_thickness.mean(),
'ABEpidermisThickness_SD':epidermis_abaxial_thickness.std(),
'LeafVolume':leaf_volume,
'MesophyllVolume':mesophyll_volume,
'ADEpidermisVolume':epidermis_adaxial_volume,
'ABEpidermisVolume':epidermis_abaxial_volume,
'VeinVolume':vein_volume,
'CellVolume':cell_volume,
'IASVolume':air_volume,
'IASSurfaceArea':true_ias_SA,
'_SLICEStrimmed':trim_slices,
'_X_VALUEStrimme':trim_column*2}
results_out = | DataFrame(data_out, index={sample_name}) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from glob import glob
from datetime import datetime
import libs.dirs as dirs
import libs.commons as commons
# from libs.index import IndexManager
import libs.utils as utils
class IterInfo:
def __init__(self, datasetFolder, indexPath, iterFolder):
self.datasetFolder = datasetFolder
self.iterFolder = iterFolder
self.indexPath = indexPath
self.iteration = 0
self.completed_iter = False
class IterationManager:
'''
Iteration loop organizer class.
Manages a iteration loop folder, containing a iter_info.pickle file that saves a IterInfo
object that has details about the current loop state.
Constructor arguments:
unlabeledFolder:
Folder containing the unlabeled images.
unlabeledIndexPath:
Path to the csv Index of the unlabeled images in unlabeledFolder
loopFolder:
Folder where the iteration folders and info will be saved
'''
def __init__(self, unlabeledFolder, unlabeledIndexPath, loopFolder=dirs.iter_folder):
self.unlabeledFolder = unlabeledFolder
self.unlabeledIndexPath = unlabeledIndexPath
self.loopFolder = Path(loopFolder)
self.iterInfoPath = self.loopFolder / "iter_info.pickle"
self.load_info()
def load_info(self):
if self.iterInfoPath.is_file():
self.iterInfo = utils.load_pickle(self.iterInfoPath)
else:
self.iterInfo = IterInfo(self.unlabeledFolder, self.unlabeledIndexPath, self.loopFolder)
dirs.create_folder(self.loopFolder)
utils.save_pickle(self.iterInfo, self.iterInfoPath)
return self.iterInfo
def new_iteration(self):
'''
create new iteration folder v
sample new images v
update iter_info v
label images
merge new labels (manual) to annotated dataset
train model
set boundaries
automatic annotation
merge new labels (automatic) to annotated dataset
update iter_info, iteration complete
Executes the following operations:
Check if it is the first iteration;
Load base index, create folders and iter_info;
Sample images
'''
if self.iterInfo.completed_iter == False and self.iterInfo.iteration != 0:
raise ValueError("Current iteration has not finished. Resolve it and try again.")
self.iterInfo.iteration += 1
self.iterInfo.completed_iter = False
print("Starting iteration {}.".format(self.iterInfo.iteration))
self.iterInfo.currentIterFolder = self.loopFolder / "iteration_{}".format(self.iterInfo.iteration)
dirs.create_folder(self.iterInfo.currentIterFolder)
print("Iteration setup finished.\nCall sample_images method for next step: sample and label images.")
def sample_images(self, seed=None):
'''
Sample a percentage (1%) of the unlabeled images for labeling.
Saves sampled images to 'iteration_#/sampled_images/'.
Sampled images index is saved to 'iteration_#/sampled_images.csv'.
'''
self.samplesIndexPath = self.iterInfo.currentIterFolder / \
"sampled_images_iteration_{}.csv".format(self.iterInfo.iteration)
# Check if samples index already exists: probably means sample_images was
# already executed this iteration
if self.samplesIndexPath.is_file():
raise FileExistsError(
"Sampled index already exists.\nHas sampling been already performed this iteration?\n \
To perform new sampling, delete sampled_images folder and index and run sample_images\
method again.")
# TODO: REMEMBER to Remove fixed seed when using sampler outside of testing
self.sampler = SampleImages(self.unlabeledIndexPath,
self.iterInfo.currentIterFolder, seed=seed)
self.sampler.sample(percentage=0.01)
self.sampler.save_to_index(self.samplesIndexPath)
def merge_labeled_dataset(self):
pass
def train_model(self):
pass
class SampleImages:
'''
Sample images from a folder or an index determined by source.
Sampled images are copied to destFolder / 'sampled_images'/.
'''
def __init__(self, source, destFolder, seed=None, verbose=True):
self.date = datetime.now()
self.source = Path(source)
self.destFolder = Path(destFolder)
self.imageFolder = self.destFolder / "sampled_images"
self.percentage = None
self.seed = seed
self.verbose = verbose
self.index = None
np.random.seed(self.seed)
dirs.create_folder(self.destFolder)
dirs.create_folder(self.imageFolder)
def sample(self, percentage=0.01, sample_min=None):
self.percentage = percentage
self.sample_min = sample_min
if self.source.is_dir():
# Sample files from directory
self._sample_from_folder()
elif self.source.suffix == ".csv":
# Sample files from entries in a csv index
self._sample_from_index()
else:
raise ValueError("Source must be a folder or csv index path.")
if self.verbose:
print("{}/{} images copied to \"{}\".".format(self.numSuccess, self.numSamples, self.destFolder))
if self.numSuccess != self.numSamples:
print("{} image paths did not exist and were not copied.".format(self.numSamples-self.numSuccess))
def _sample_routine(self):
self.numImages = len(self.imageList)
# Choose a number of images to sample
self.numSamples = int(self.numImages*self.percentage)
if self.sample_min is not None:
self.numSamples = np.clip(self.numSamples, self.sample_min, None)
# Uniform sampling of a percentage of total images
self.sampleIndexes = np.random.choice(self.numImages, size=self.numSamples, replace=False)
# Copy images to dest path
print("Copying images...")
self.imageSourcePaths = np.array(self.imageList)[self.sampleIndexes]
self.imageDestPaths = []
self.numSuccess = 0
for i in tqdm(range(self.numSamples)):
imagePath = self.imageSourcePaths[i]
destPath = self.get_image_dest_path(imagePath)
success = utils.copy_files(imagePath, destPath)
self.numSuccess += success
self.imageDestPaths.append(destPath)
print("\nImage copying finished.")
def _sample_from_folder(self):
self.index = None
# Get video paths in dataset folder (all videos)
self.imageList = glob(str(self.source) + "/**" + "/*.jpg", recursive=True)
self.imageList = list(map(utils.func_strip, self.imageList))
self._sample_routine()
return self.imageSourcePaths
def _sample_from_index(self):
self.index = | pd.read_csv(self.source, dtype=str) | pandas.read_csv |
import os
import sys
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from matplotlib.dates import date2num as d2n
from collections.abc import Iterable
import numpy as np
month_dict = {"01": "Jan", "02": "Feb", "03": "Mar", "04": "Apr", "05": "May", "06": "Jun",
"07": "Jul", "08": "Aug", "09": "Sep", "10": "Oct", "11": "Nov", "12": "Dec"}
from datetime import datetime, timedelta
def get_xticks_and_xlabels(dfs, col="Date"):
date_df = | pd.concat([df["Date"] for df in dfs]) | pandas.concat |
import datetime
import re
import pandas as pd
import numpy as np
def mix_freq(lf_data, hf_data, xlag, ylag, horizon, start_date=None, end_date=None):
"""
Set up data for mixed-frequency regression
Args:
lf_data (Series): Low-frequency time series
hf_data (Series): High-frequency time series
xlag (int or str): Number of high frequency lags
ylag (int or str): Number of low-frequency lags
horizon (int):
start_date (date): Date on which to start estimation
end_date (date); Date on which to end estimation
Returns:
"""
ylag = calculate_lags(ylag, lf_data)
xlag = calculate_lags(xlag, hf_data)
min_date_y = lf_data.index[ylag]
min_date_x = hf_data.index[xlag + horizon]
if min_date_y < min_date_x:
min_date_y = next(d for d in list(lf_data.index) if d > min_date_x)
if (start_date is None) or (start_date < min_date_y):
start_date = min_date_y
if end_date is None:
end_date = lf_data.index[-2]
max_date = lf_data.index[-1]
if max_date > hf_data.index[-1]:
max_date = next(d for d in reversed(list(lf_data.index)) if d < hf_data.index[-1])
if end_date > max_date:
end_date = max_date
forecast_start_date = lf_data.index[lf_data.index.get_loc(end_date) + 1]
ylags = None
if ylag > 0:
# N.B. ylags will be a dataframe because there can be more than 1 lag
ylags = pd.concat([lf_data.shift(l) for l in range(1, ylag + 1)], axis=1)
x_rows = []
for lfdate in lf_data.loc[start_date:max_date].index:
start_hf = hf_data.index.get_loc(lfdate, method='bfill') # @todo Find a more efficient way
x_rows.append(hf_data.iloc[start_hf - horizon: start_hf - xlag - horizon: -1].values)
x = pd.DataFrame(data=x_rows, index=lf_data.loc[start_date:max_date].index)
return (lf_data.loc[start_date:end_date],
ylags.loc[start_date:end_date] if ylag > 0 else None,
x.loc[start_date:end_date],
lf_data[forecast_start_date:max_date],
ylags[forecast_start_date:max_date] if ylag > 0 else None,
x.loc[forecast_start_date:])
def calculate_lags(lag, time_series):
if isinstance(lag, str):
return parse_lag_string(lag, data_freq(time_series)[0])
else:
return lag
def data_freq(time_series):
"""
Determine frequency of given time series
Args:
time_series (Series): Series with datetime index
Returns:
string: frequency specifier
"""
try:
freq = time_series.index.freq
return freq.freqstr or pd.infer_freq(time_series.index)
except AttributeError:
return pd.infer_freq(time_series.index)
def parse_lag_string(lag_string, freq):
"""
Determine number of lags from lag string
Args:
lag_string: String indicating number of lags (eg, "3M", "2Q")
freq (string): Frequency of series to be lagged
Returns:
"""
freq_map = {
'd': {'m': 30, 'd': 1},
'b': {'m': 22, 'b': 1},
'm': {'q': 3, 'm': 1},
'q': {'y': 4},
'a': {'y': 1}
}
m = re.match('(\d+)(\w)', lag_string)
duration = int(m.group(1))
period = m.group(2).lower()
return duration * freq_map[freq.lower()][period]
if __name__ == '__main__':
lf_data = | pd.read_csv('./tests/data/gdp.csv', parse_dates=['DATE']) | pandas.read_csv |
from functools import partial
from itertools import product
from string import ascii_letters
import numpy as np
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
period_range,
)
from .pandas_vb_common import tm
method_blocklist = {
"object": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"cummin",
"mean",
"max",
"skew",
"cumprod",
"cummax",
"pct_change",
"min",
"var",
"mad",
"describe",
"std",
"quantile",
},
"datetime": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"mean",
"skew",
"cumprod",
"cummax",
"pct_change",
"var",
"mad",
"describe",
"std",
},
}
class ApplyDictReturn:
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(
lambda x: {"first": x.values[0], "last": x.values[-1]}
)
class Apply:
param_names = ["factor"]
params = [4, 5]
def setup(self, factor):
N = 10 ** factor
# two cases:
# - small groups: small data (N**4) + many labels (2000) -> average group
# size of 5 (-> larger overhead of slicing method)
# - larger groups: larger data (N**5) + fewer labels (20) -> average group
# size of 5000
labels = np.random.randint(0, 2000 if factor == 4 else 20, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame(
{
"key": labels,
"key2": labels2,
"value1": np.random.randn(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
}
)
self.df = df
def time_scalar_function_multi_col(self, factor):
self.df.groupby(["key", "key2"]).apply(lambda x: 1)
def time_scalar_function_single_col(self, factor):
self.df.groupby("key").apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, factor):
self.df.groupby(["key", "key2"]).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, factor):
self.df.groupby("key").apply(self.df_copy_function)
class Groups:
param_names = ["key"]
params = ["int64_small", "int64_large", "object_small", "object_large"]
def setup_cache(self):
size = 10 ** 6
data = {
"int64_small": Series(np.random.randint(0, 100, size=size)),
"int64_large": Series(np.random.randint(0, 10000, size=size)),
"object_small": Series(
tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))
),
"object_large": Series(
tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size))
),
}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
def time_series_indices(self, data, key):
self.ser.groupby(self.ser).indices
class GroupManyLabels:
params = [1, 1000]
param_names = ["ncols"]
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth:
param_names = ["dtype"]
params = ["float32", "float64", "datetime", "object"]
def setup(self, dtype):
N = 10 ** 5
# with datetimes (GH7555)
if dtype == "datetime":
values = date_range("1/1/2011", periods=N, freq="s")
elif dtype == "object":
values = ["foo"] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({"key": key, "values": values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby("key").nth(0, dropna="any")
def time_groupby_nth_all(self, dtype):
self.df.groupby("key").nth(0, dropna="all")
def time_frame_nth(self, dtype):
self.df.groupby("key").nth(0)
def time_series_nth_any(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="any")
def time_series_nth_all(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="all")
def time_series_nth(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0)
class DateAttributes:
def setup(self):
rng = date_range("1/1/2000", "12/31/2005", freq="H")
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64:
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list("abcde")
self.df = DataFrame(arr, columns=self.cols)
self.df["jim"], self.df["joe"] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype:
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64("nat")
offsets[np.random.rand(n) > 0.5] = np.timedelta64("nat")
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list("ab"), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"dates": dates,
"value2": value2,
"value3": np.random.randn(n),
"ints": np.random.randint(0, 1000, size=n),
"obj": obj,
"offsets": offsets,
}
)
return df
def time_multi_count(self, df):
df.groupby(["key1", "key2"]).count()
class CountMultiInt:
def setup_cache(self):
n = 10000
df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"ints": np.random.randint(0, 1000, size=n),
"ints2": np.random.randint(0, 1000, size=n),
}
)
return df
def time_multi_int_count(self, df):
df.groupby(["key1", "key2"]).count()
def time_multi_int_nunique(self, df):
df.groupby(["key1", "key2"]).nunique()
class AggFunctions:
def setup_cache(self):
N = 10 ** 5
fac1 = np.array(["A", "B", "C"], dtype="O")
fac2 = np.array(["one", "two"], dtype="O")
df = DataFrame(
{
"key1": fac1.take(np.random.randint(0, 3, size=N)),
"key2": fac2.take(np.random.randint(0, 2, size=N)),
"value1": np.random.randn(N),
"value2": np.random.randn(N),
"value3": np.random.randn(N),
}
)
return df
def time_different_str_functions(self, df):
df.groupby(["key1", "key2"]).agg(
{"value1": "mean", "value2": "var", "value3": "sum"}
)
def time_different_numpy_functions(self, df):
df.groupby(["key1", "key2"]).agg(
{"value1": np.mean, "value2": np.var, "value3": np.sum}
)
def time_different_python_functions_multicol(self, df):
df.groupby(["key1", "key2"]).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby("key1").agg([sum, min, max])
class GroupStrings:
def setup(self):
n = 2 * 10 ** 5
alpha = list(map("".join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list("abcd"))
self.df["joe"] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list("abcd")).max()
class MultiColumn:
def setup_cache(self):
N = 10 ** 5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame(
{
"key1": key1,
"key2": key2,
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
return df
def time_lambda_sum(self, df):
df.groupby(["key1", "key2"]).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(["key1", "key2"]).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(["key1", "key2"])["data1"].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(["key1", "key2"])["data1"].agg(np.sum)
class Size:
def setup(self):
n = 10 ** 5
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
self.df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"value1": np.random.randn(n),
"value2": np.random.randn(n),
"value3": np.random.randn(n),
"dates": dates,
}
)
self.draws = Series(np.random.randn(n))
labels = Series(["foo", "bar", "baz", "qux"] * (n // 4))
self.cats = labels.astype("category")
def time_multi_size(self):
self.df.groupby(["key1", "key2"]).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class Shift:
def setup(self):
N = 18
self.df = DataFrame({"g": ["a", "b"] * 9, "v": list(range(N))})
def time_defaults(self):
self.df.groupby("g").shift()
def time_fill_value(self):
self.df.groupby("g").shift(fill_value=99)
class FillNA:
def setup(self):
N = 100
self.df = DataFrame(
{"group": [1] * N + [2] * N, "value": [np.nan, 1.0] * N}
).set_index("group")
def time_df_ffill(self):
self.df.groupby("group").fillna(method="ffill")
def time_df_bfill(self):
self.df.groupby("group").fillna(method="bfill")
def time_srs_ffill(self):
self.df.groupby("group")["value"].fillna(method="ffill")
def time_srs_bfill(self):
self.df.groupby("group")["value"].fillna(method="bfill")
class GroupByMethods:
param_names = ["dtype", "method", "application", "ncols"]
params = [
["int", "float", "object", "datetime", "uint"],
[
"all",
"any",
"bfill",
"count",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"describe",
"ffill",
"first",
"head",
"last",
"mad",
"max",
"min",
"median",
"mean",
"nunique",
"pct_change",
"prod",
"quantile",
"rank",
"sem",
"shift",
"size",
"skew",
"std",
"sum",
"tail",
"unique",
"value_counts",
"var",
],
["direct", "transformation"],
[1, 2, 5, 10],
]
def setup(self, dtype, method, application, ncols):
if method in method_blocklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
if ncols != 1 and method in ["value_counts", "unique"]:
# DataFrameGroupBy doesn't have these methods
raise NotImplementedError
if application == "transformation" and method in [
"head",
"tail",
"unique",
"value_counts",
"size",
]:
# DataFrameGroupBy doesn't have these methods
raise NotImplementedError
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups).reshape(-1, 1)
rng = np.broadcast_to(rng, (len(rng), ncols))
taker = np.random.randint(0, ngroups, size=size)
values = rng.take(taker, axis=0)
if dtype == "int":
key = np.random.randint(0, size, size=size)
elif dtype == "uint":
key = np.random.randint(0, size, size=size, dtype=dtype)
elif dtype == "float":
key = np.concatenate(
[np.random.random(ngroups) * 0.1, np.random.random(ngroups) * 10.0]
)
elif dtype == "object":
key = ["foo"] * size
elif dtype == "datetime":
key = date_range("1/1/2011", periods=size, freq="s")
cols = [f"values{n}" for n in range(ncols)]
df = DataFrame(values, columns=cols)
df["key"] = key
if len(cols) == 1:
cols = cols[0]
if application == "transformation":
if method == "describe":
raise NotImplementedError
self.as_group_method = lambda: df.groupby("key")[cols].transform(method)
self.as_field_method = lambda: df.groupby(cols)["key"].transform(method)
else:
self.as_group_method = getattr(df.groupby("key")[cols], method)
self.as_field_method = getattr(df.groupby(cols)["key"], method)
def time_dtype_as_group(self, dtype, method, application, ncols):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application, ncols):
self.as_field_method()
class GroupByCythonAgg:
"""
Benchmarks specifically targetting our cython aggregation algorithms
(using a big enough dataframe with simple key, so a large part of the
time is actually spent in the grouped aggregation).
"""
param_names = ["dtype", "method"]
params = [
["float64"],
[
"sum",
"prod",
"min",
"max",
"mean",
"median",
"var",
"first",
"last",
"any",
"all",
],
]
def setup(self, dtype, method):
N = 1_000_000
df = DataFrame(np.random.randn(N, 10), columns=list("abcdefghij"))
df["key"] = np.random.randint(0, 100, size=N)
self.df = df
def time_frame_agg(self, dtype, method):
self.df.groupby("key").agg(method)
class Cumulative:
param_names = ["dtype", "method"]
params = [
["float64", "int64", "Float64", "Int64"],
["cummin", "cummax", "cumsum"],
]
def setup(self, dtype, method):
N = 500_000
vals = np.random.randint(-10, 10, (N, 5))
null_vals = vals.astype(float, copy=True)
null_vals[::2, :] = np.nan
null_vals[::3, :] = np.nan
df = DataFrame(vals, columns=list("abcde"), dtype=dtype)
null_df = DataFrame(null_vals, columns=list("abcde"), dtype=dtype)
keys = np.random.randint(0, 100, size=N)
df["key"] = keys
null_df["key"] = keys
self.df = df
self.null_df = null_df
def time_frame_transform(self, dtype, method):
self.df.groupby("key").transform(method)
def time_frame_transform_many_nulls(self, dtype, method):
self.null_df.groupby("key").transform(method)
class RankWithTies:
# GH 21237
param_names = ["dtype", "tie_method"]
params = [
["float64", "float32", "int64", "datetime64"],
["first", "average", "dense", "min", "max"],
]
def setup(self, dtype, tie_method):
N = 10 ** 4
if dtype == "datetime64":
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({"values": data, "key": ["foo"] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby("key").rank(method=tie_method)
class Float32:
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame({"a": arr, "b": arr})
def time_sum(self):
self.df.groupby(["a"])["b"].sum()
class String:
# GH#41596
param_names = ["dtype", "method"]
params = [
["str", "string[python]"],
[
"sum",
"prod",
"min",
"max",
"mean",
"median",
"var",
"first",
"last",
"any",
"all",
],
]
def setup(self, dtype, method):
cols = list("abcdefghjkl")
self.df = DataFrame(
np.random.randint(0, 100, size=(1_000_000, len(cols))),
columns=cols,
dtype=dtype,
)
def time_str_func(self, dtype, method):
self.df.groupby("a")[self.df.columns[1:]].agg(method)
class Categories:
def setup(self):
N = 10 ** 5
arr = np.random.random(N)
data = {"a": Categorical(np.random.randint(10000, size=N)), "b": arr}
self.df = | DataFrame(data) | pandas.DataFrame |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
| tm.close() | pandas.util.testing.close |
# -*- coding: utf-8 -*-
# @Author : BoPo
# @Time : 2021/10/11 17:28
# @Function:
import json
import httpx
import pandas as pd
from tenacity import stop_after_attempt, wait_fixed, retry
from mootdx.consts import return_last_value
@retry(wait=wait_fixed(2), retry_error_callback=return_last_value, stop=stop_after_attempt(5))
def fq_factor(method: str, symbol: str) -> pd.DataFrame:
zh_sina_a_stock_hfq_url = "https://finance.sina.com.cn/realstock/company/{}/hfq.js"
zh_sina_a_stock_qfq_url = "https://finance.sina.com.cn/realstock/company/{}/qfq.js"
client = httpx.Client(verify=False)
if method == "hfq":
res = client.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(json.loads(res.text.split("=")[1].split("\n")[0])["data"])
if hfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
# hfq_factor_df = hfq_factor_df.set_index('date')
return hfq_factor_df
else:
res = client.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(json.loads(res.text.split("=")[1].split("\n")[0])["data"])
if qfq_factor_df.shape[0] == 0:
raise ValueError("sina hfq factor not available")
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = | pd.to_datetime(qfq_factor_df.date) | pandas.to_datetime |
import sys
import os
import pandas as pd
class UserError(Exception):
"""Errors regarding operations on user field of credentials."""
pass
class IAM:
"""Identity and Access Manager"""
# credentials storage path
_path: str = "credentials.csv"
# local timezone
_tz_local: str = 'utc'
# needed columns in credentials dataframe
_columns: list = ["user", "role", "password", "creation"]
def __init__(self, path: str = "credentials.csv", tz_local = "utc"):
"""Parameters:
- path: path of a csv file containing credentials with columns user, role, password, creation;
- tz_local: local timezone used for timestamps."""
self._path = path
self._tz_local = tz_local
def _df_is_valid(self, /, df: pd.DataFrame) -> bool:
for col in self._columns:
if col not in df.columns.tolist():
# missing column
return False
# df is valid
return True
def _read_df(self, check: bool = True) -> pd.DataFrame:
# read df
if os.path.exists(self._path):
df = pd.read_csv(self._path, usecols=self._columns)
else:
df = pd.DataFrame(columns=self._columns)
# check df validity
if check and not self._df_is_valid(df):
raise RuntimeError(f"credentials dataframe stored in {self._path} is not valid")
return df
def _write_df(self, /, df: pd.DataFrame) -> None:
df.loc[:, self._columns].to_csv(self._path, index=False)
def is_registered(self, user: str) -> bool:
"""Check if user is already registered."""
if user not in self._read_df(check=True).user.tolist():
return False
return True
def _registration_is_valid(self, user: str, errors: str = "strict", invert: bool = False) -> bool:
"""Check registration validity.
A registration is considered valid when `invert` is True and `user` is not registered
or `invert` is False and `user` is registered.
If `errors` is strict an exception is raised on invalid registration, if it is `ignore`
only a False boolean value is returned."""
if invert == self.is_registered(user):
if errors == "strict":
raise UserError(f"user \"{user}\" is {'already' if invert else 'not'} registered")
elif errors == "ignore":
return False
else:
raise ValueError("invalid error mode")
return True
def _get_column(self, column: str, user: str, errors: str = "strict"):
"""Get a value from a specific column and user."""
if not self._registration_is_valid(user, errors):
# return here only if errors is not strict, otherwise _registration_is_valid
# throws an exception on invalid registration
return ""
return self._read_df(check=True).loc[lambda x: x.user == user, column].values[0]
def add_user(self, user: str, password: str, role: str = "", errors: str = "strict") -> None:
"""Add a new user to credential store.
If user is already registered a UserError is raised when `errors`
is "strict" or nothing is done if `errors` is \"ignore\"."""
if not self._registration_is_valid(user, errors, invert=True):
# return here only if errors is not strict, otherwise _registration_is_valid
# throws an exception on invalid registration
return
# append new user
df = self._read_df(check=True).append({"user": user,
"role": role,
"password": password,
"creation": | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
# FORMULACIÓ PRÒPIA
# LLIBRERIES
import numpy as np
from mpmath import mp # per tenir més decimals
mp.dps = 50
import pandas as pd
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix, coo_matrix
from scipy.sparse import lil_matrix, diags, hstack, vstack
from scipy.sparse.linalg import spsolve, factorized
np.set_printoptions(linewidth=2000, edgeitems=1000, suppress=True)
| pd.set_option('display.max_rows', 5000) | pandas.set_option |
import pandas as pd
if __name__ == '__main__':
tennet_delta_df = | pd.read_csv('../data/tennet_balans_delta/tennet_balans_delta_2021.csv') | pandas.read_csv |
from typing import List, Optional, Union
from geopandas.array import points_from_xy
import numpy as np
import netCDF4
import pandas as pd
import geopandas as gpd
import shapely.vectorized
from scipy.spatial import cKDTree
from pathlib import Path
# from joblib import Parallel, delayed
import typer
import shapely
from shapely.geometry import Point
# from pyproj import crs
# import pyproj
from powergenome.params import DATA_PATHS, IPM_SHAPEFILE_PATH, IPM_GEOJSON_PATH
from powergenome.transmission import haversine
from powergenome.nrelatb import investment_cost_calculator, fetch_atb_costs
from powergenome.util import reverse_dict_of_lists, init_pudl_connection, find_centroid
from powergenome.price_adjustment import inflation_price_adjustment
import math
CWD = Path.cwd()
VCE_DATA_PATH = Path("/Volumes/Extreme SSD/princeton_data")
VCE_WIND_PATH = VCE_DATA_PATH / "PRINCETON-Wind-Data-2012"
VCE_SOLAR_PATH = VCE_DATA_PATH / "PRINCETON-Solar-Data-2012"
ATB_USD_YEAR = 2018
ATB_DATA_YEAR = 2020
pudl_engine, pudl_out = init_pudl_connection()
cost_multiplier_region_map = {
"TRE": ["ERC_PHDL", "ERC_REST", "ERC_WEST"],
"FRCC": ["FRCC"],
"MISW": ["MIS_WUMS", "MIS_MNWI", "MIS_IA"],
"MISE": ["MIS_LMI"],
"PJMC": ["PJM_COMD"],
"MISC": ["MIS_IL", "MIS_MO", "S_D_AECI", "MIS_INKY"],
"SPPN": ["MIS_MAPP", "SPP_WAUE", "SPP_NEBR", "MIS_MIDA"],
"SPPC": ["SPP_N"],
"SPPS": ["SPP_WEST", "SPP_SPS"],
"MISS": ["MIS_AMSO", "MIS_WOTA", "MIS_LA", "MIS_AR", "MIS_D_MS"],
"SRSE": ["S_SOU"],
"SRCA": ["S_VACA"],
"PJMD": ["PJM_Dom"],
"PJMW": ["PJM_West", "PJM_AP", "PJM_ATSI"],
"PJME": ["PJM_WMAC", "PJM_EMAC", "PJM_SMAC", "PJM_PENE", "PJM_NJLand"],
"SRCE": ["S_C_TVA", "S_C_KY"],
"NYUP": [
"NY_Z_A",
"NY_Z_B",
"NY_Z_C&E",
"NY_Z_D",
"NY_Z_F",
"NY_Z_G-I",
],
"NYCW": ["NY_Z_J", "NY_Z_K"],
"ISNE": ["NENG_ME", "NENGREST", "NENG_CT"],
"RMRG": ["WECC_CO"],
"BASN": ["WECC_ID", "WECC_WY", "WECC_UT", "WECC_NNV"],
"NWPP": ["WECC_PNW", "WECC_MT"],
"CANO": ["WEC_CALN", "WEC_BANC"],
"CASO": ["WECC_IID", "WECC_SCE", "WEC_LADW", "WEC_SDGE"],
"SRSG": ["WECC_AZ", "WECC_NM", "WECC_SNV"],
}
rev_cost_mult_region_map = reverse_dict_of_lists(cost_multiplier_region_map)
tx_capex_region_map = {
"wecc": [
"WECC_AZ",
"WECC_CO",
"WECC_ID",
"WECC_MT",
"WECC_NM",
"WECC_NNV",
"WECC_PNW",
"WECC_SNV",
"WECC_UT",
"WECC_WY",
],
"ca": [
"WEC_BANC",
"WEC_CALN",
"WEC_LADW",
"WEC_SDGE",
"WECC_IID",
"WECC_SCE",
],
"tx": [
"ERC_PHDL",
"ERC_REST",
"ERC_WEST",
],
"upper_midwest": [
"MIS_MAPP",
"SPP_WAUE",
"MIS_MNWI",
"MIS_MIDA",
"MIS_IA",
"MIS_IL",
"MIS_INKY",
],
"lower_midwest": [
"SPP_N",
"SPP_WEST",
"SPP_SPS",
"SPP_NEBR",
],
"miso_s": [
"MIS_LA",
"MIS_WOTA",
"MIS_AMSO",
"MIS_AR",
"MIS_MO",
"S_D_AECI",
"MIS_D_MS",
],
"great_lakes": [
"MIS_WUMS",
"MIS_LMI",
],
"pjm_s": [
"PJM_AP",
"PJM_ATSI",
"PJM_COMD",
"PJM_Dom",
"PJM_West",
"S_C_KY",
],
"pj_pa": [
"PJM_PENE",
"PJM_WMAC",
],
"pjm_md_nj": ["PJM_EMAC", "PJM_SMAC", "PJM_NJLand"],
"ny": [
"NY_Z_A",
"NY_Z_B",
"NY_Z_C&E",
"NY_Z_D",
"NY_Z_F",
"NY_Z_G-I",
"NY_Z_J",
],
"tva": [
"S_C_TVA",
],
"south": [
"S_SOU",
],
"fl": ["FRCC"],
"vaca": ["S_VACA"],
"ne": [
"NY_Z_K",
"NENG_CT",
"NENG_ME",
"NENGREST",
],
}
rev_region_mapping = reverse_dict_of_lists(tx_capex_region_map)
spur_costs_2013 = {
"wecc": 3900,
"ca": 3900 * 2.25, # According to Reeds docs, CA is 2.25x the rest of WECC
"tx": 3900,
"upper_midwest": 3900,
"lower_midwest": 3800,
"miso_s": 3900 * 2.25,
"great_lakes": 4100,
"pjm_s": 3900 * 2.25,
"pj_pa": 3900 * 2.25,
"pjm_md_nj": 3900 * 2.25,
"ny": 3900 * 2.25,
"tva": 3800,
"south": 4950,
"fl": 4100,
"vaca": 3800,
"ne": 3900 * 2.25,
}
spur_costs_2017 = {
region: inflation_price_adjustment(cost, 2013, ATB_USD_YEAR)
for region, cost in spur_costs_2013.items()
}
tx_costs_2013 = {
"wecc": 1350,
"ca": 1350 * 2.25, # According to Reeds docs, CA is 2.25x the rest of WECC
"tx": 1350,
"upper_midwest": 900,
"lower_midwest": 900,
"miso_s": 1750,
"great_lakes": 1050,
"pjm_s": 1350,
"pj_pa": 1750,
"pjm_md_nj": 4250, # Bins are $1500 wide - assume max bin is $750 above max
"ny": 2750,
"tva": 1050,
"south": 1350,
"fl": 1350,
"vaca": 900,
"ne": 4250, # Bins are $1500 wide - assume max bin is $750 above max
}
tx_costs_2017 = {
region: inflation_price_adjustment(cost, 2013, ATB_USD_YEAR)
for region, cost in tx_costs_2013.items()
}
spur_line_wacc = 0.069
spur_line_investment_years = 60
def load_atb_capex_wacc():
settings = {
"atb_cap_recovery_years": 20,
"atb_financial_case": "Market",
"atb_cost_case": "Mid",
"atb_usd_year": 2017,
"target_usd_year": ATB_USD_YEAR,
"pv_ac_dc_ratio": 1.34,
"cost_multiplier_region_map": cost_multiplier_region_map,
"atb_data_year": ATB_DATA_YEAR,
"atb_new_gen": [
["UtilityPV", "LosAngeles", "Mid", 1],
["LandbasedWind", "LTRG4", "Mid", 1],
],
}
atb_costs = fetch_atb_costs(pudl_engine, settings)
solarpv_2030_capex = atb_costs.query(
"technology=='UtilityPV' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030 & tech_detail=='LosAngeles'"
)["capex_mw"].values[0]
wind_2030_capex = atb_costs.query(
"technology=='LandbasedWind' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030"
# & tech_detail=='LTRG1'"
)["capex_mw"].values[0]
solarpv_2030_wacc = atb_costs.query(
"technology=='UtilityPV' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030 & tech_detail=='LosAngeles'"
)["wacc_nominal"].values[0]
wind_2030_wacc = atb_costs.query(
"technology=='LandbasedWind' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030"
# & tech_detail=='LTRG1'"
)["wacc_nominal"].values[0]
solarpv_2030_fom = atb_costs.query(
"technology=='UtilityPV' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030 & tech_detail=='LosAngeles'"
)["fixed_o_m_mw"].values[0]
wind_2030_fom = atb_costs.query(
"technology=='LandbasedWind' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030"
# & tech_detail=='LTRG1'"
)["fixed_o_m_mw"].values[0]
financials_dict = {
"capex_mw": {"wind": wind_2030_capex, "solarpv": solarpv_2030_capex},
"wacc": {"wind": wind_2030_wacc, "solarpv": solarpv_2030_wacc},
"fom_mw": {"wind": wind_2030_fom, "solarpv": solarpv_2030_fom},
}
return financials_dict
def load_regional_cost_multipliers():
regional_cost_multipliers = pd.read_csv(
"AEO_2020_regional_cost_corrections.csv", index_col=0
)
regional_cost_multipliers = regional_cost_multipliers.fillna(1)
return regional_cost_multipliers
def load_site_locations(folder=Path.cwd(), as_gdf=True):
site_locations = pd.read_csv(folder / "RUC_LatLonSites.csv", dtype={"Site": str})
site_locations["Site"] = site_locations["Site"].str.zfill(6)
if as_gdf:
site_locations = gpd.GeoDataFrame(
site_locations,
crs="EPSG:4326",
geometry=gpd.points_from_xy(
site_locations.Longitude,
site_locations.Latitude,
),
)
return site_locations
def fix_geometries(gdf):
region_polys = {}
fixed_regions = {}
for region in gdf.index:
region_polys[region] = []
try:
for i in range(len(gdf.loc[region, "geometry"])):
region_polys[region].append(
shapely.geometry.Polygon(gdf.loc[region, "geometry"][i].exterior)
)
except TypeError:
region_polys[region].append(
shapely.geometry.Polygon(gdf.loc[region, "geometry"].exterior)
)
fixed_regions[region] = shapely.geometry.MultiPolygon(region_polys[region])
gdf.geometry = [x for x in fixed_regions.values()]
return gdf
def load_substations(min_kv=161):
substation_gdf = gpd.read_file(
CWD / "Electric_Substations" / "Electric_Substations.shp"
)
# substation_gdf = substation_gdf.to_crs(epsg=4326)
substation_gdf = substation_gdf.loc[
(substation_gdf["TYPE"] == "SUBSTATION")
& (substation_gdf["STATUS"].isin(["IN SERVICE", "UNDER CONST"]))
& (substation_gdf["MAX_VOLT"] >= min_kv),
["ID", "MAX_VOLT", "MIN_VOLT", "geometry", "STATE"],
]
substation_gdf = substation_gdf.rename(
columns={"ID": "substation_id", "STATE": "substation_state"}
)
substation_gdf["latitude"] = substation_gdf.geometry.y
substation_gdf["longitude"] = substation_gdf.geometry.x
return substation_gdf
def load_ipm_shapefile(filetype="geojson"):
"""Load the IPM shapefile or geojson file.
Parameters
----------
filetype : str, optional
Either "shp" or "geojson", by default "shp"
Returns
-------
GeoDataFrame
IPM_Region (region names) and geometry columns
"""
print("loading IPM shapefile")
if filetype.lower() == "shp":
file_path = IPM_SHAPEFILE_PATH
elif filetype.lower() == "geojson":
file_path = IPM_GEOJSON_PATH
else:
raise ValueError(
f"Parameter 'filetype' must be 'shp' or 'geojson', not {filetype}"
)
ipm_regions = gpd.read_file(file_path)
ipm_regions = ipm_regions.to_crs(epsg=4326)
ipm_regions = fix_geometries(ipm_regions)
return ipm_regions
def load_metro_areas_shapefile():
shpfile_path = (
CWD
/ "USA_Core_Based_Statistical_Area" # / "USA_Core_Based_Statistical_Area.shp"
)
metro_areas = gpd.read_file(shpfile_path)
metro_areas = metro_areas.to_crs(epsg=4326)
corrected_metro_centroids = pd.read_csv(
CWD.parent / "bin" / "msa_urban_centroids.csv"
)
corrected_metro_centroids["CBSA_ID"] = corrected_metro_centroids["CBSA_ID"].astype(
"str"
)
corrected_metro_centroids = corrected_metro_centroids.set_index("CBSA_ID")
corrected_metro_centroids = gpd.GeoDataFrame(
corrected_metro_centroids,
geometry=points_from_xy(
corrected_metro_centroids["msa_longitude"],
corrected_metro_centroids["msa_latitude"],
),
crs="EPSG:4326",
)
metro_areas["center"] = find_centroid(metro_areas)
metro_areas["corrected_center"] = metro_areas["CBSA_ID"].map(
corrected_metro_centroids["geometry"]
)
metro_areas["msa_center"] = metro_areas["center"]
metro_areas.loc[~metro_areas["corrected_center"].isna(), "center"] = metro_areas[
"corrected_center"
]
keep_cols = [
"CBSA_ID",
"NAME",
"CBSA_TYPE",
"POPULATION",
"center",
"msa_center",
"geometry",
]
# metro_areas["geometry"] = metro_areas["center"]
metro_areas = metro_areas.loc[:, keep_cols]
metro_areas["metro_id"] = metro_areas["CBSA_ID"]
metro_areas.columns = metro_areas.columns.str.lower()
metro_areas["state"] = metro_areas["name"].str.split(", ").str[-1]
metro_areas = metro_areas.loc[~metro_areas.state.isin(["AK", "HI", "PR"]), :]
NY_Z_J_lon_lat = (-73.930488, 40.695448)
NY_Z_K_lon_lat = (-73.008906, 40.840391)
extra_metros = pd.DataFrame(
[["NY_Z_J", 1e6], ["NY_Z_K", 1e6]], columns=["metro_id", "population"]
)
extra_metros = gpd.GeoDataFrame(
extra_metros,
geometry=points_from_xy(*zip(NY_Z_J_lon_lat, NY_Z_K_lon_lat)),
crs="EPSG:4326",
)
extra_metros["center"] = extra_metros["geometry"]
metro_areas = pd.concat([metro_areas, extra_metros], ignore_index=True, sort=False)
return metro_areas
def load_us_states_gdf():
us_states = gpd.read_file(
"https://eric.clst.org/assets/wiki/uploads/Stuff/gz_2010_us_040_00_5m.json"
)
drop_states = ["Puerto Rico", "Alaska", "Hawaii"]
us_states = us_states.loc[~(us_states["NAME"].isin(drop_states)), :]
us_states = us_states.reset_index(drop=True)
return us_states
def load_cpa_gdf(sheet, target_crs, slope_filter=None, layer=None):
# if layer is not None:
# cpa_gdf = gpd.read_file(filepath, layer=layer)
# else:
# cpa_gdf = gpd.read_file(filepath)
cpa_gdf = pd.read_excel("NZA_CandidateProjectArea_Base_PG.xlsx", sheet_name=sheet)
if slope_filter:
cpa_gdf = cpa_gdf.loc[cpa_gdf["m_slope"] <= slope_filter, :]
cpa_gdf = cpa_gdf.reset_index(drop=True)
cpa_gdf = gpd.GeoDataFrame(
cpa_gdf,
geometry=gpd.points_from_xy(cpa_gdf.CENTROID_X, cpa_gdf.CENTROID_Y),
crs="EPSG:4326",
)
cpa_gdf = cpa_gdf.to_crs(target_crs)
# centroid = find_centroid(cpa_gdf)
cpa_gdf["Latitude"] = cpa_gdf.CENTROID_Y
cpa_gdf["Longitude"] = cpa_gdf.CENTROID_X
cpa_gdf["cpa_id"] = cpa_gdf.index
return cpa_gdf
def load_gen_profiles(site_list, resource, variable):
if resource.lower() == "wind":
resource = "Wind"
resource_path = VCE_WIND_PATH
elif resource.lower() == "solarpv":
resource = "SolarPV"
resource_path = VCE_SOLAR_PATH
site_profiles = {}
for s in site_list:
fpath = f"Site_{s}_{resource}.nc4"
site_data = netCDF4.Dataset(resource_path / fpath)
gen_profile = np.array(site_data[variable])
site_profiles[s] = gen_profile
df = | pd.DataFrame(site_profiles) | pandas.DataFrame |
"""Analyze trades"""
import sqlite3
from contextlib import closing
from datetime import datetime
import pandas as pd
select_sql = """
SELECT * FROM trades
WHERE time >= ? AND time <= ?
"""
def load_trades(db_file, start_time, end_time):
"""Load trades from db_file in given time range."""
conn = sqlite3.connect(db_file)
with closing(conn) as db:
df = pd.read_sql(select_sql, db, params=(start_time, end_time))
# We can't use detect_types=sqlite3.PARSE_DECLTYPES here since Go is
# inserting time zone and Python's sqlite3 doesn't handle it.
# See https://bugs.python.org/issue29099
df["time"] = | pd.to_datetime(df["time"]) | pandas.to_datetime |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_negative_data(self):
# Values less than 0, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) - 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
data = -1 * np.random.randint(0, 20, size=20).reshape(5, 4)
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(-10 * data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_nan_data(self):
# nans, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
data[3, 2] = np.nan
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
data[1, 3] = np.nan
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_non_numeric_data(self):
# data contains at least some non-numeric columns, expect errors.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks.iloc[2, 2] = '3'
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_columns_identical(self):
# Columns are identical, no error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, sources)
pd.util.testing.assert_frame_equal(obs_sinks, sinks)
def test_columns_non_identical(self):
# Columns are not identical, error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=['feature%s' % i for i in range(4)])
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
class TestValidateGibbsParams(TestCase):
def test_acceptable_inputs(self):
# All values acceptable, expect no errors.
alpha1 = .001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
alpha1 = alpha2 = beta = 0
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
def test_not_acceptable_inputs(self):
# One of the float params is negative.
alpha1 = -.001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is 0.
alpha1 = .001
restarts = 0
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is a float.
restarts = 1.34
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a string.
restarts = '3.2232'
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a nan.
restarts = 3
alpha1 = np.nan
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
class TestIntersectAndSortSamples(TestCase):
def test_partially_overlapping_tables(self):
# Test an example where there are unshared samples present in both
# feature and sample tables. Notice that order is different between
# the samples that are shared between both tables. The order of samples
# in the returned tables is set by the ordering done in np.intersect1d.
sdata_c1 = [3.1, 'red', 5]
sdata_c2 = [3.6, 'yellow', 7]
sdata_c3 = [3.9, 'yellow', -2]
sdata_c4 = [2.5, 'red', 5]
sdata_c5 = [6.7, 'blue', 10]
samples = ['s1', 's4', 's2', 's3', 'sX']
headers = ['pH', 'color', 'day']
stable = pd.DataFrame([sdata_c1, sdata_c4, sdata_c2, sdata_c3,
sdata_c5], index=samples, columns=headers)
fdata = np.arange(90).reshape(9, 10)
samples = ['s%i' % i for i in range(3, 12)]
columns = ['o%i' % i for i in range(1, 11)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = pd.DataFrame(fdata[[1, 0], :], index=['s4', 's3'],
columns=columns)
exp_stable = pd.DataFrame([sdata_c4, sdata_c3], index=['s4', 's3'],
columns=headers)
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
# No shared samples, expect a ValueError.
ftable.index = ['ss%i' % i for i in range(9)]
self.assertRaises(ValueError, intersect_and_sort_samples, stable,
ftable)
# All samples shared, expect no changes.
fdata = np.arange(50).reshape(5, 10)
samples = ['s1', 's4', 's2', 's3', 'sX']
columns = ['o%i' % i for i in range(10)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = ftable.loc[stable.index, :]
exp_stable = stable
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
class TestGetSamples(TestCase):
def tests(self):
# Make a dataframe which contains mixed data to test.
col0 = ['a', 'a', 'a', 'a', 'b']
col1 = [3, 2, 3, 1, 3]
col2 = ['red', 'red', 'blue', 255, 255]
headers = ['sample_location', 'num_reps', 'color']
samples = ['s1', 's2', 's3', 's4', 's5']
sample_metadata = \
pd.DataFrame.from_dict({k: v for k, v in zip(headers,
[col0, col1, col2])})
sample_metadata.index = samples
obs = get_samples(sample_metadata, 'sample_location', 'b')
exp = pd.Index(['s5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'sample_location', 'a')
exp = pd.Index(['s1', 's2', 's3', 's4'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'color', 255)
exp = pd.Index(['s4', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'num_reps', 3)
exp = pd.Index(['s1', 's3', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
class TestCollapseSourceData(TestCase):
def test_example1(self):
# Simple example with 'sum' as collapse mode.
samples = ['sample1', 'sample2', 'sample3', 'sample4']
category = 'pH'
values = [3.0, 0.4, 3.0, 3.0]
stable = pd.DataFrame(values, index=samples, columns=[category])
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=stable.index,
columns=map(str, np.arange(4)))
source_samples = ['sample1', 'sample2', 'sample3']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :], fdata[0, :] + fdata[2, :]))
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
# Example with collapse mode 'mean'. This will cause non-integer values
# to be present, which the validate_gibbs_input should catch.
source_samples = ['sample1', 'sample2', 'sample3', 'sample4']
method = 'mean'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :],
fdata[[0, 2, 3], :].mean(0))).astype(np.int32)
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
def test_example2(self):
# Test on another arbitrary example.
data = np.arange(200).reshape(20, 10)
oids = ['o%s' % i for i in range(20)]
sids = ['s%s' % i for i in range(10)]
ftable = pd.DataFrame(data.T, index=sids, columns=oids)
_stable = \
{'s4': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's0': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'D'},
's1': {'cat1': '1', 'cat2': 'x', 'cat3': 'A', 'cat4': 'C'},
's3': {'cat1': '2', 'cat2': 'y', 'cat3': 'z', 'cat4': 'A'},
's2': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's6': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'R'},
's5': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's7': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's9': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's8': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'}}
stable = pd.DataFrame(_stable).T
category = 'cat4'
source_samples = ['s4', 's9', 's0', 's2']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_index = np.array(['0', 'D'])
exp_data = np.array([[9, 19, 29, 39, 49, 59, 69, 79, 89, 99, 109, 119,
129, 139, 149, 159, 169, 179, 189, 199],
[6, 36, 66, 96, 126, 156, 186, 216, 246, 276, 306,
336, 366, 396, 426, 456, 486, 516, 546, 576]],
dtype=np.int32)
exp = pd.DataFrame(exp_data, index=exp_index, columns=oids)
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
class TestSubsampleDataframe(TestCase):
def test_no_errors_expected(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_subsample_with_replacement(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n, replace=True)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_shape_doesnt_change(self):
# Test that when features are removed by subsampling, the shape of the
# table does not change. Although rarifaction is stochastic, the
# probability that the below table does not lose at least one feature
# during rarefaction (and thus satisfy as the test of the condition we)
# are interested in) is nearly 0.
fdata = np.array([[0, 0, 0, 1e4],
[0, 0, 1, 1e4],
[0, 1, 0, 1e4],
[1, 0, 0, 1e4]]).astype(int)
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 10
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
self.assertEqual(obs.shape, ftable.shape)
class TestDataAggregationFunctions(TestCase):
'''Test that returned data is collated and written correctly.'''
def test_cumulative_proportions(self):
# 4 draws, 4 sources + unknown, 3 sinks
sink1_envcounts = np.array([[10, 100, 15, 0, 25],
[150, 0, 0, 0, 0],
[30, 30, 30, 30, 30],
[0, 11, 7, 35, 97]])
sink2_envcounts = np.array([[100, 10, 15, 0, 25],
[100, 0, 50, 0, 0],
[0, 60, 30, 30, 30],
[7, 11, 0, 35, 97]])
sink3_envcounts = np.array([[100, 10, 10, 5, 25],
[70, 20, 30, 30, 0],
[10, 30, 50, 30, 30],
[0, 27, 100, 20, 3]])
all_envcounts = [sink1_envcounts, sink2_envcounts, sink3_envcounts]
sink_ids = np.array(['sink1', 'sink2', 'sink3'])
source_ids = np.array(['source1', 'source2', 'source3', 'source4'])
cols = list(source_ids) + ['Unknown']
prp_r1 = np.array([190, 141, 52, 65, 152]) / 600.
prp_r2 = np.array([207, 81, 95, 65, 152]) / 600.
prp_r3 = np.array([180, 87, 190, 85, 58]) / 600.
prp_data = np.vstack([prp_r1, prp_r2, prp_r3])
prp_std_data = np.zeros((3, 5), dtype=np.float64)
prp_std_data[0, 0] = (np.array([10, 150, 30, 0]) / 600.).std()
prp_std_data[0, 1] = (np.array([100, 0, 30, 11]) / 600.).std()
prp_std_data[0, 2] = (np.array([15, 0, 30, 7]) / 600.).std()
prp_std_data[0, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[0, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[1, 0] = (np.array([100, 100, 0, 7]) / 600.).std()
prp_std_data[1, 1] = (np.array([10, 0, 60, 11]) / 600.).std()
prp_std_data[1, 2] = (np.array([15, 50, 30, 0]) / 600.).std()
prp_std_data[1, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[1, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[2, 0] = (np.array([100, 70, 10, 0]) / 600.).std()
prp_std_data[2, 1] = (np.array([10, 20, 30, 27]) / 600.).std()
prp_std_data[2, 2] = (np.array([10, 30, 50, 100]) / 600.).std()
prp_std_data[2, 3] = (np.array([5, 30, 30, 20]) / 600.).std()
prp_std_data[2, 4] = (np.array([25, 0, 30, 3]) / 600.).std()
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=cols)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids, columns=cols)
obs_prp, obs_prp_std = cumulative_proportions(all_envcounts, sink_ids,
source_ids)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
def test_single_sink_feature_table(self):
# 4 draws, depth of sink = 10, 5 sources + Unknown.
final_env_assignments = np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
# notice that each row is the same - they are determined by
# `generate_taxon_sequence` before the `gibbs_sampler` runs.
final_taxon_assignments = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
# we are allowing more taxa than we have found in this sample, i.e. the
# largest value in `final_taxon_assignments` will be smaller than the
# largest index in the columns of the final table.
nfeatures = 1250
nsources = 5
data = np.zeros((nsources + 1, nfeatures), dtype=np.int32)
# for the purpose of this test code, I'll increment data taxa by taxa.
data[np.array([5, 1, 4, 2]), 0] += 1
data[0, 3] += 3
data[1, 3] += 3
data[3, 3] += 1
data[4, 3] += 1
data[np.array([0, 3, 4, 5]), 227] += 1
data[0, 550] += 1
data[1, 550] += 3
data[2, 550] += 3
data[4, 550] += 2
data[5, 550] += 3
data[0, 999] += 2
data[1, 999] += 4
data[3, 999] += 2
data[1, 1100] += 2
data[2, 1100] += 2
exp_sources = ['source%s' % i for i in range(nsources)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp = | pd.DataFrame(data, index=exp_sources, columns=feature_ids) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy.io
from netCDF4 import Dataset
# create tmp2m for each year
for year in range(1979, 2021):
print('preprocessing year:', year)
lat = np.arange(-90, 91)
lon = np.arange(0, 360)
times = pd.date_range('{}-01-01'.format(year), '{}-12-31'.format(year))
t_regrid = scipy.io.loadmat('regrid_{}.mat'.format(year))['regrid']
m = np.swapaxes(t_regrid, 0, 1)
lat, lon, dates = np.meshgrid(lat, lon, times, indexing='ij')
dates = dates.flatten()
lat = lat.flatten()
lon = lon.flatten()
arrays = [lat, lon, dates]
tuples = list(zip(*arrays))
indexnames = ['lat', 'lon', 'start_date']
index = pd.MultiIndex.from_tuples(tuples, names=indexnames)
s = pd.Series(m.flatten(), index=index)
s = s.to_frame()
s.columns = ['tmp2m']
s = s.dropna()
s.to_hdf('tmp2m.{}.verif.h5'.format(year), key='data')
# create tmp2m covering western us
# combine all the western us data and save it into one pandas dataframe
w_us = | pd.read_hdf('western_us_mask.h5') | pandas.read_hdf |
import subprocess
from io import StringIO
from datetime import datetime, timedelta
import os
import logging
import math
import humanize
import pandas as pd
import numpy as np
from params import SQUEUE_USER, UPLOAD_DIR
# Data sources are:
# SLURM (on ACCRE at Vanderbilt)
# Local Filesystem (as configured for DAX upload queue)
#
# Note that this app does not access XNAT or REDCap or any external source
#
# Data is cached in a pickle file named results.pkl. This data is written when the
# app first starts and then any time the user clicks Refresh Data. It is read
# any time the user changes the data filtering.
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
SQUEUE_CMD = 'squeue -u '+','.join(SQUEUE_USER)+' --format="%all"'
DFORMAT = '%Y-%m-%d %H:%M:%S'
# we concat diskq status and squeue status to make a single status
# squeue states: CG,F, PR, S, ST
# diskq statuses: JOB_RUNNING, JOB_FAILED, NEED_TO_RUN, COMPLETE,
# UPLOADING, READY_TO_COMPLETE, READY_TO_UPLOAD
STATUS_MAP = {
'COMPLETENONE': 'COMPLETE',
'JOB_FAILEDNONE': 'FAILED',
'JOB_RUNNINGCD': 'RUNNING',
'JOB_RUNNINGCG': 'RUNNING',
'JOB_RUNNINGF': 'RUNNING',
'JOB_RUNNINGR': 'RUNNING',
'JOB_RUNNINGNONE': 'RUNNING',
'JOB_RUNNINGPD': 'PENDING',
'NONENONE': 'WAITING',
'READY_TO_COMPLETENONE': 'COMPLETE',
'READY_TO_UPLOADNONE': 'COMPLETE'}
JOB_TAB_COLS = [
'LABEL', 'PROJECT', 'STATUS', 'PROCTYPE', 'USER',
'JOBID', 'TIME', 'WALLTIME', 'LASTMOD']
SQUEUE_COLS = [
'NAME', 'ST', 'STATE', 'PRIORITY', 'JOBID', 'MIN_MEMORY',
'TIME', 'SUBMIT_TIME', 'START_TIME', 'TIME_LIMIT', 'TIME_LEFT', 'USER']
# Get fresh data from slurm and disk
def get_job_data():
# TODO: run each load in separate threads
# Load tasks in diskq
logging.debug('loading diskq')
diskq_df = load_diskq_queue()
# load squeue
logging.debug('loading squeue')
squeue_df = load_slurm_queue()
# TODO: load xnat if we want to identify lost jobs in a separate tab
# merge squeue data into task queue
logging.debug('merging data')
if diskq_df.empty and squeue_df.empty:
logging.debug('both empty')
df = pd.DataFrame(columns=diskq_df.columns.union(squeue_df.columns))
elif diskq_df.empty:
logging.debug('diskq empty')
df = squeue_df.reindex(squeue_df.columns.union(diskq_df.columns), axis=1)
elif squeue_df.empty:
logging.debug('squeue empty')
df = diskq_df.reindex(diskq_df.columns.union(squeue_df.columns), axis=1)
else:
df = | pd.merge(diskq_df, squeue_df, how='outer', on=['LABEL', 'USER']) | pandas.merge |
# multivariate multihead multistep
from keras.layers import Flatten
from keras.layers import ConvLSTM2D
from keras.layers.merge import concatenate
from numpy import array
from numpy import hstack
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Reshape
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import os
import gc
import joblib
import functions as func
from sklearn.model_selection import RandomizedSearchCV
from keras.utils import to_categorical
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer
from sklearn.preprocessing import StandardScaler
import time
from sklearn.metrics import fbeta_score
from sklearn.metrics import r2_score
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.optimizers import Adam
from keras.constraints import maxnorm
from keras.layers import Dropout
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
import pickle
import tensorflow as tf
def R2_measure(y_true, y_pred):
return r2_score(y_true, y_pred)
def f2_measure(y_true, y_pred):
return fbeta_score(y_true, y_pred, labels=[1, 2], beta=2, average='micro')
def split_sequences(data, n_steps, n_step_out):
data = data.values
X, y = list(), list()
for i in range(len(data)):
end_ix = i + n_steps*6
if end_ix > len(data):
break
Kx = np.empty((1, 12))
for index in np.arange(i, i+(n_steps*6), step=6, dtype=int):
eachhour = index + 6
if eachhour > len(data) or i+(n_steps*6) > len(data):
break
a = data[index: eachhour, : (-1*n_step_out)]
hourlymean_x = np.round(np.mean(a, axis=0), decimals=2)
hourlymean_y = data[eachhour-1, (-1*n_step_out):]
hourlymean_x = hourlymean_x.reshape((1, hourlymean_x.shape[0]))
if index != i:
Kx = np.append(Kx, hourlymean_x, axis=0)
else:
Kx = hourlymean_x
X.append(Kx)
y.append(hourlymean_y)
# print(np.array(X).shape)
return np.array(X), np.array(y)
def temporal_horizon(df, pd_steps, target):
for pd_steps in [1, 3, 6, 12, 24, 36, 48, 60, 72]:
pd_steps = pd_steps * 6
target_values = df[[target]]
target_values = target_values.drop(
target_values.index[0: pd_steps], axis=0)
target_values.index = np.arange(0, len(target_values[target]))
df['Target_'+target+'_t'+str(pd_steps)] = target_values
df = df.drop(df.index[len(df.index)-(72*6): len(df.index)], axis=0)
return df
def create_reg_LSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Reshape(target_shape=(
n_steps_in*totalsets, n_features), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(tf.keras.layers.LSTM(neurons, activation=activation, return_sequences=True,
kernel_constraint=tf.keras.constraints.MaxNorm(weight_constraint)))
model.add(tf.keras.layers.Dropout(dropout_rate))
# , return_sequences=True))
model.add(tf.keras.layers.LSTM(neurons, activation=activation))
# model.add(Dense(neurons, activation=activation)) # Adding new layer
model.add(tf.keras.layers.Dense(n_steps_out))
opt = tf.keras.optimizers.Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
# model.save(save_weights_only=True, best_model_only=True)
print('model: ' + str(model))
return model
def create_reg_NN_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, dropout_rate=0.0, weight_constraint=0, activation='sigmoid'):
model = Sequential()
model.add(Dense(neurons, activation=activation,
kernel_constraint=maxnorm(weight_constraint), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(Dropout(dropout_rate))
model.add(Dense(neurons, activation=activation))
model.add(Dense(neurons, activation=activation)) # adding new layer
model.add(Dense(n_steps_out))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
print('model: ' + str(model))
return model
def create_reg_endecodeLSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, activation='sigmoid'):
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in*totalsets, n_features), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(LSTM(neurons, activation=activation,
input_shape=(n_steps_in*totalsets, n_features)))
model.add(RepeatVector(1))
model.add(LSTM(neurons, activation=activation, return_sequences=True))
model.add(TimeDistributed(Dense(n_steps_out)))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
return model
def create_reg_CNNenLSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, activation='sigmoid'):
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in*totalsets, n_features), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(Conv1D(64, 1, activation=activation,
input_shape=(n_steps_in*totalsets, n_features)))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(RepeatVector(1))
model.add(LSTM(neurons, activation=activation, return_sequences=True))
model.add(TimeDistributed(Dense(100, activation=activation)))
model.add(TimeDistributed(Dense(n_steps_out)))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
return model
def create_reg_ConvLSTM_model(input_dim, n_steps_in, n_features, n_steps_out, neurons=1, learn_rate=0.01, activation='sigmoid'):
# reshape from [samples, timesteps] into [samples, timesteps, rows, columns, features(channels)]
model = Sequential()
model.add(Reshape(target_shape=(
n_steps_in, totalsets, n_features, 1), input_shape=(n_steps_in*n_features*totalsets,)))
model.add(ConvLSTM2D(64, (1, 3), activation=activation,
input_shape=(n_steps_in, totalsets, n_features, 1)))
model.add(Flatten())
model.add(RepeatVector(1))
model.add(LSTM(neurons, activation=activation, return_sequences=True))
model.add(TimeDistributed(Dense(100, activation=activation)))
model.add(TimeDistributed(Dense(n_steps_out)))
opt = Adam(lr=learn_rate)
model.compile(loss='mse', optimizer=opt)
return model
def algofind(model_name, input_dim, cat, n_features, n_steps_out, params, n_jobs):
if cat == 0:
if model_name == 'endecodeLSTM':
activation, neurons, batch_size, epochs, learn_rate, n_steps_in = params
model = KerasRegressor(build_fn=create_reg_endecodeLSTM_model, input_dim=input_dim, activation=activation,
epochs=epochs, batch_size=batch_size, neurons=neurons, n_steps_in=int(n_steps_in), learn_rate=learn_rate, n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'CNNLSTM':
activation, neurons, batch_size, epochs, learn_rate, n_steps_in = params
model = KerasRegressor(build_fn=create_reg_CNNenLSTM_model, input_dim=input_dim, activation=activation,
epochs=epochs, batch_size=batch_size, neurons=neurons, n_steps_in=int(n_steps_in), learn_rate=learn_rate, n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'ConvEnLSTM':
# activation, neuron, bach size, epoch, learning rate, n_steps_in
activation, neurons, batch_size, epochs, learn_rate, n_steps_in = params
model = KerasRegressor(build_fn=create_reg_ConvLSTM_model, input_dim=input_dim, activation=activation,
epochs=epochs, batch_size=batch_size, neurons=neurons, n_steps_in=int(n_steps_in), learn_rate=learn_rate, n_features=int(n_features), n_steps_out=int(n_steps_out), verbose=0)
elif model_name == 'NN':
activation, neurons, batch_size, epochs, learn_rate, dropout_rate, weight_constraint, n_steps_in = params
model = KerasRegressor(build_fn=create_reg_NN_model, epochs=epochs, batch_size=batch_size,
input_dim=input_dim, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), neurons=neurons, learn_rate=learn_rate,
dropout_rate=dropout_rate, weight_constraint=weight_constraint, activation=activation, verbose=0)
elif model_name == 'LSTM': # _oneSonde
activation, neurons, batch_size, epochs, learn_rate, dropout_rate, weight_constraint, n_steps_in = params
model = KerasRegressor(build_fn=create_reg_LSTM_model, epochs=epochs, batch_size=batch_size,
input_dim=input_dim, n_steps_in=int(n_steps_in), n_features=int(n_features), n_steps_out=int(n_steps_out), neurons=neurons, learn_rate=learn_rate,
dropout_rate=dropout_rate, weight_constraint=weight_constraint, activation=activation, verbose=0)
elif model_name == 'DT':
min_samples_split, max_features, criterion, max_depth, min_samples_leaf, n_steps = params
model = MultiOutputRegressor(DecisionTreeRegressor(max_depth=max_depth,
criterion=criterion,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features))
elif model_name == 'RF':
max_features, n_estimators, max_depth, min_samples_leaf, min_samples_split, bootstrap, n_steps = params
model = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
bootstrap=bootstrap,
n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features))
elif model_name == 'DT_onereg':
# split, maxfeatures, criterion, minsampleleaf, maxdepth, lag
min_samples_split, max_features, criterion, max_depth, min_samples_leaf, n_steps = params
model = DecisionTreeRegressor(max_depth=max_depth,
criterion=criterion,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features, n_jobs=n_jobs)
elif model_name == 'RF_onereg':
# maxfeature, n_estimator, maxdepth, minsampleleaf, min_samplesplit, bootstrap, lag
max_features, n_estimators, max_depth, min_samples_leaf, min_samples_split, bootstrap, n_steps = params
model = RandomForestRegressor(max_depth=max_depth,
bootstrap=bootstrap,
n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features, n_jobs=n_jobs)
elif model_name == 'SVC':
epsilon, gamma, C, n_steps = params
model = MultiOutputRegressor(
SVR(epsilon=epsilon, gamma=gamma, C=C))
return model
totalsets = 2
def main():
# models = ['endecodeLSTM', 'CNNLSTM', 'ConvEnLSTM',
# 'NN', 'SVC', 'RF_onereg', 'DT_onereg']
models = ['LSTM'] # save the models later
# 'DOcategory', 'pHcategory','ph', 'dissolved_oxygen',
targets = ['dissolved_oxygen', 'ph']
path = 'Sondes_data/train_Summer/'
# files = [f for f in os.listdir(path) if f.endswith(
# ".csv") and f.startswith('leavon')] # leavon
files = ['osugi.csv', 'utlcp.csv',
'leoc_1.csv', 'leavon.csv']
n_job = -1
PrH_index = 0
for model_name in models:
print(model_name)
for target in targets:
print(target)
if target.find('category') > 0:
cat = 1
directory = 'Results/bookThree/2sondes/output_Cat_' + \
model_name+'/final_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta'}
else:
cat = 0
directory = 'Results/bookThree/2sondes/output_Reg_' + \
model_name+'/final_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mse': 'mse', 'rmse': 'rmse', 'R2': 'R2'}
if not os.path.exists(directory):
os.makedirs(directory)
print(directory)
directoryresult = directory + 'Results/'
if not os.path.exists(directoryresult):
os.makedirs(directoryresult)
# resultFileName = 'results_'+target+str(time.time())+'.csv'
for file in files:
method = 'OrgData'
params = func.trained_param_grid[
'param_grid_'+model_name+str(cat)]
n_steps_in = func.getlags_window(
model_name, params['param_'+target+'_'+str(PrH_index)], cat)
print(n_steps_in)
dataset = | pd.read_csv(path+file) | pandas.read_csv |
#!/usr/bin/env python3
import argparse
import glob
import numpy as np
import os
import pandas as pd
import quaternion
import sys
import trimesh
import json
from tqdm import tqdm
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
__dir__ = os.path.normpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..')
)
sys.path[1:1] = [__dir__]
top_classes = {
"03211117": "display", "04379243": "table",
"02747177": "trashbin", "03001627": "chair",
# "04256520": "sofa", "02808440": "bathtub",
"02933112": "cabinet", "02871439": "bookshelf"
}
from shapefit.utils.utils import get_validation_appearance, get_symmetries, get_gt_dir, \
get_scannet, get_shapenet, make_M_from_tqs, make_tqs_from_M
# helper function to calculate difference between two quaternions
def calc_rotation_diff(q, q00):
rotation_dot = np.dot(quaternion.as_float_array(q00), quaternion.as_float_array(q))
rotation_dot_abs = np.abs(rotation_dot)
try:
error_rotation_rad = 2 * np.arccos(rotation_dot_abs)
except:
return 0.0
error_rotation = np.rad2deg(error_rotation_rad)
return error_rotation
def rotation_error(row):
q = quaternion.quaternion(*row[:4])
q_gt = quaternion.quaternion(*row[4:8])
sym = row[-1]
if sym == "__SYM_ROTATE_UP_2":
m = 2
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
elif sym == "__SYM_ROTATE_UP_4":
m = 4
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
elif sym == "__SYM_ROTATE_UP_INF":
m = 36
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
else:
return calc_rotation_diff(q, q_gt)
def print_to_(verbose, log_file, string):
if verbose:
print(string)
sys.stdout.flush()
with open(log_file, 'a+') as f:
f.write(string + '\n')
def get_init_mesh(scan_id, key):
path = glob.glob(os.path.join(
'/home/ishvlad/workspace/Scan2CAD/MeshDeformation/ARAP/',
'arap_output_GT', scan_id, key + '*', 'init.obj'
))
if len(path) == 0:
return None
return trimesh.load_mesh(path[0], process=False)
def DAME(mesh_1, mesh_2, k=0.59213):
def dihedral(mesh):
unique_faces, _ = np.unique(np.sort(mesh.faces, axis=1), axis=0, return_index=True)
parts_bitriangles_map = []
bitriangles = {}
for face in unique_faces:
edge_1 = tuple(sorted([face[0], face[1]]))
if edge_1 not in bitriangles:
bitriangles[edge_1] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_1].add(face[2])
edge_2 = tuple(sorted([face[1], face[2]]))
if edge_2 not in bitriangles:
bitriangles[edge_2] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_2].add(face[0])
edge_3 = tuple(sorted([face[0], face[2]]))
if edge_3 not in bitriangles:
bitriangles[edge_3] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_3].add(face[1])
bitriangles_aligned = np.empty((len(mesh.edges_unique), 4), dtype=int)
for j, edge in enumerate(mesh.edges_unique):
bitriangle = [*sorted(edge)]
bitriangle += [x for x in list(bitriangles[tuple(sorted(edge))]) if x not in bitriangle]
bitriangles_aligned[j] = bitriangle
vertices_bitriangles_aligned = mesh.vertices[bitriangles_aligned]
normals_1 = np.cross((vertices_bitriangles_aligned[:, 2] - vertices_bitriangles_aligned[:, 0]),
(vertices_bitriangles_aligned[:, 2] - vertices_bitriangles_aligned[:, 1]))
normals_1 = normals_1 / np.sqrt(np.sum(normals_1 ** 2, axis=1)[:, None])
normals_2 = np.cross((vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 0]),
(vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 1]))
normals_2 = normals_2 / np.sqrt(np.sum(normals_2 ** 2, axis=1)[:, None])
n1_n2_arccos = np.arccos(np.sum(normals_1 * normals_2, axis=1).clip(-1, 1))
n1_n2_signs = np.sign(
np.sum(normals_1 * (vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 1]), axis=1))
D_n1_n2 = n1_n2_arccos * n1_n2_signs
return D_n1_n2
D_mesh_1 = dihedral(mesh_1)
D_mesh_2 = dihedral(mesh_2)
mask_1 = np.exp((k * D_mesh_1) ** 2)
per_edge = np.abs(D_mesh_1 - D_mesh_2) * mask_1
result = np.sum(per_edge) / len(mesh_1.edges_unique)
return result, per_edge
def calc_ATSD(dists, border):
return np.minimum(dists.min(1), border).mean()
def calc_F1(dists, border):
return np.sum(dists.min(1) < border) / len(dists)
def calc_CD(dists, border):
return max(np.minimum(dists.min(1), border).mean(), np.minimum(dists.min(0), border).mean())
def calc_metric(scan_mesh, shape_mesh, method='all', border=0.1):
area = border * 2
# get scan bbox
bbox = np.array([shape_mesh.vertices.min(0), shape_mesh.vertices.max(0)])
bbox += [[-area], [area]]
batch = np.array([np.diag(bbox[0]), np.diag(bbox[1]), np.eye(3), -np.eye(3)])
slice_mesh = scan_mesh.copy()
# xyz
for i in range(3):
slice_mesh = slice_mesh.slice_plane(batch[0, i], batch[2, i])
slice_mesh = slice_mesh.slice_plane(batch[1, i], batch[3, i])
if len(slice_mesh.vertices) == 0:
if method == 'all':
return {'ATSD': border, 'CD': border, 'F1': 0.0}
else:
return border
scan_vertices = np.array(slice_mesh.vertices)
if len(scan_vertices) > 20000:
scan_vertices = scan_vertices[::len(scan_vertices) // 20000]
dists = cdist(np.array(shape_mesh.vertices), scan_vertices, metric='minkowski', p=1)
if method == 'ATSD':
return calc_ATSD(dists, border)
elif method == 'CD':
return calc_CD(dists, border)
elif method == 'F1':
return calc_F1(dists, border)
else:
return {
'ATSD': calc_ATSD(dists, border),
'CD': calc_CD(dists, border),
'F1': calc_F1(dists, border),
}
def metric_on_deformation(options):
output_name = options.output_name + '_' + str(options.border) + \
'_' + str(options.val_set) + '_' + str(options.metric_type)
if options.output_type == 'align':
# load needed models
appearance = get_validation_appearance(options.val_set)
# LOAD list of all aligned scenes
csv_files = glob.glob(os.path.join(options.input_dir, '*.csv'))
scenes = [x.split('/')[-1][:-4] for x in csv_files]
# Which scenes do we want to calculate?
scenes = np.intersect1d(scenes, list(appearance.keys()))
batch = []
for s in scenes:
df_scan = pd.read_csv(
os.path.join(options.input_dir, s + '.csv'),
index_col=0, dtype={'objectCategory': str}
)
# Filter: take only objects from appearance
df_scan['key'] = df_scan.objectCategory + '_' + df_scan.alignedModelId
df_scan = df_scan[np.in1d(df_scan['key'].values, list(appearance[s].keys()))]
batch.extend([{
'scan_id': s,
'key': row['key'],
'objectCategory': row['objectCategory'],
'alignedModelId': row['alignedModelId'],
'path': 'path to origin ShapeNet mesh',
'object_num': i,
'T': [row['tx'], row['ty'], row['tz']],
'Q': [row['qw'], row['qx'], row['qy'], row['qz']],
'S': [row['sx'], row['sy'], row['sz']]
} for i, row in df_scan.iterrows()])
df = pd.DataFrame(batch)
else:
# LOAD list of all aligned scenes
in_files = glob.glob(os.path.join(options.input_dir, 'scene*/*/approx.obj'))
if len(in_files) == 0:
in_files = glob.glob(os.path.join(options.input_dir, '*/scene*/*/approx.obj'))
info = []
for x in in_files:
parts = x.split('/')[-3:-1]
if len(parts[1].split('_')) == 3:
category_id, shape_id, object_num = parts[1].split('_')
else:
category_id, shape_id = parts[1].split('_')
object_num = -1
row = [
parts[0], # scan_id
category_id + '_' + shape_id, # key
category_id,
shape_id,
object_num,
x, # path
]
info.append(row)
df = pd.DataFrame(info, columns=['scan_id', 'key', 'objectCategory', 'alignedModelId', 'object_num', 'path'])
transform_files = ['/'.join(x.split('/')[:-1]) + '/transform.json' for x in in_files]
Ts, Qs, Ss = [], [], []
for f in transform_files:
if os.path.exists(f):
matrix = np.array(json.load(open(f, 'rb'))['transform'])
else:
Ts.append(None)
Qs.append(None)
Ss.append(None)
continue
t, q, s = make_tqs_from_M(matrix)
q = quaternion.as_float_array(q)
Ts.append(t)
Qs.append(q)
Ss.append(s)
df['T'] = Ts
df['Q'] = Qs
df['S'] = Ss
metrics = {}
batch = df.groupby('scan_id')
if options.verbose:
batch = tqdm(batch, desc='Scenes')
# CALCULATE METRICS
for scan_id, df_scan in batch:
scan_mesh = get_scannet(scan_id, 'mesh')
scan_batch = df_scan.iterrows()
if options.verbose:
scan_batch = tqdm(scan_batch, total=len(df_scan), desc='Shapes', leave=False)
for i, row in scan_batch:
if options.output_type == 'align':
shape_mesh = get_shapenet(row['objectCategory'], row['alignedModelId'], 'mesh')
else:
try:
shape_mesh = trimesh.load_mesh(row['path'])
except Exception:
metrics[i] = {'ATSD': np.nan, 'CD': np.nan, 'F1': np.nan}
continue
if row['T'] is None:
metrics[i] = {'ATSD': np.nan, 'CD': np.nan, 'F1': np.nan}
continue
T = make_M_from_tqs(row['T'], row['Q'], row['S'])
shape_mesh.apply_transform(T)
metrics[i] = calc_metric(scan_mesh, shape_mesh, border=options.border)
df_final = df.merge(pd.DataFrame(metrics).T, left_index=True, right_index=True)
df_final.to_csv(output_name + '.csv')
if len(df_final) == 0:
print_to_(options.verbose, output_name + '.log', 'No aligned shapes')
return
df_final = df_final[~pd.isna(df_final['ATSD'])]
# Calculate INSTANCE accuracy
acc = df_final[['ATSD', 'CD', 'F1']].mean().values
acc[-1] *= 100
print_string = '#' * 57 + '\nINSTANCE MEAN. ATSD: {:>4.2f}, CD: {:>4.2f}, F1: {:6.2f}\n'.format(
*acc) + '#' * 57
print_to_(options.verbose, output_name + '.log', print_string)
df_final['name'] = [top_classes.get(x, 'zother') for x in df_final.objectCategory]
df_class = df_final.groupby('name').mean()[['ATSD', 'CD', 'F1']]
print_string = '###' + ' ' * 7 + 'CLASS' + ' ' * 4 + '# ATSD # CD # F1 ###'
print_to_(options.verbose, output_name + '.log', print_string)
for name, row in df_class.iterrows():
print_string = '###\t{:10} # {:>4.2f} # {:>4.2f} # {:6.2f} ###'.format(
name, row['ATSD'], row['CD'], row['F1']*100
)
print_to_(options.verbose, output_name + '.log', print_string)
acc = df_class.mean().values
acc[-1] *= 100
print_string = '#' * 57 + '\n CLASS MEAN. ATSD: {:>4.2f}, CD: {:>4.2f}, F1: {:6.2f}\n'.format(
*acc) + '#' * 57
print_to_(options.verbose, output_name + '.log', print_string)
def metric_on_alignment(options):
output_name = options.output_name + '_' + str(options.border) + \
'_' + str(options.val_set) + '_' + str(options.metric_type)
if options.output_type == 'deform':
raise Exception
# LOAD list of all aligned scenes
csv_files = glob.glob(os.path.join(options.input_dir, '*.csv'))
scenes = [x.split('/')[-1][:-4] for x in csv_files]
# Which scenes do we want to calculate?
appearances_cad = get_validation_appearance(options.val_set)
df_appearance = pd.DataFrame(np.concatenate([
[(k, kk, appearances_cad[k][kk]) for kk in appearances_cad[k]] for k in appearances_cad
]), columns=['scan_id', 'key', 'count'])
scenes = np.intersect1d(scenes, list(set(df_appearance.scan_id)))
# LOAD GT and target alignments
gt_dir = get_gt_dir('align')
batch = []
batch_gt = []
for s in scenes:
df = pd.read_csv(os.path.join(gt_dir, s + '.csv'), index_col=0, dtype={'objectCategory': str})
df['scan_id'] = s
# Filter: take only objects from appearance
df['key'] = df.objectCategory + '_' + df.alignedModelId
df = df[np.in1d(df['key'].values, list(appearances_cad[s].keys()))]
batch_gt.append(df)
df = pd.read_csv(os.path.join(options.input_dir, s + '.csv'), index_col=0, dtype={'objectCategory': str})
df['scan_id'] = s
# Filter: take only objects from appearance
df['key'] = df.objectCategory + '_' + df.alignedModelId
df = df[np.in1d(df['key'].values, list(appearances_cad[s].keys()))]
batch.append(df)
df_alignment = pd.concat(batch)
df_alignment.reset_index(drop=True, inplace=True)
df_alignment_gt = | pd.concat(batch_gt) | pandas.concat |
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
import pandas as pd
from IPython import display
import time
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
#Obtain data from CSVs
fundementalsData = pd.read_csv("./nyse/fundamentals.csv").dropna()
pricesData = pd.read_csv("./nyse/prices.csv")
#Convert data to dataframes
fundementalsDataFrame = pd.DataFrame(fundementalsData)
pricesDataFrame = | pd.DataFrame(pricesData) | pandas.DataFrame |
# coding: utf-8
# In[1]:
# Import all packages
import os
import numpy as np
import pandas as pd
from IPython.display import display
# In[2]:
# Global parameters / values
dataAux_dir = "../data_aux/"
results_dir = "../results/"
Klaeger_filename = "Klaeger.csv"
Huang_filename = "Huang.csv"
Annes100_filename = "Annes100.csv"
Annes500_filename = "Annes500.csv"
# Name of column of official gene symbols
geneSymbolColumn = "GeneSymbol"
# Thresholds for converting kinase activity metrics to boolean values
# - use_diff: bool
# Use difference between STF1081 and another compound as value to threshold
# For Annes and Huang datasets: use the absolute difference and compare to threshold diff_percent_thresh
# For Klaeger dataset: use fold difference and compare to threshold diff_fold_thresh
# - diff_percent_thresh: int or float
# Threshold absolute difference (% control (Annes) or % activity remaining (Huang)) between a less toxic compound
# and STF1081 at which a target (kinase) is considered a potential target for the toxicity of STF1081
# - diff_fold_thresh: int or float
# Threshold fold difference (Kd_app) between a less toxic compound and STF1081 at which a target (kinase)
# is considered a potential target for the toxicity of STF1081
# - min_percent_thresh: int or float
# Threshold % control (Annes) or % activity remaining (Huang) at which a target is considered to be inhibited by STF1081
# - max_percent_thresh: int or float
# Threshold % control (Annes) or % activity remaining (Huang) at which a target is considered to be not inhibited by
# a less toxic compound
use_diff = True
diff_percent_thresh = 20
diff_fold_thresh = 20
min_percent_thresh = 25
max_percent_thresh = 75
# Compare 100nM STF1081 to 100nM STF1285 (Annes100_filename) or 500nM STF1285 (Annes500_filename)
Annes_filename = Annes100_filename
# In[3]:
## Read in data
df1 = pd.read_csv(os.path.join(dataAux_dir, Klaeger_filename))
df2 = pd.read_csv(os.path.join(dataAux_dir, Huang_filename))
df3 = pd.read_csv(os.path.join(dataAux_dir, Annes_filename))
# ## Boolean filtering
#
# Add new column `bool` to each DataFrame that indicates whether target is both inhibited by STF1081 and *not* inhibited by a less toxic compound. Find the intersection across all datasets.
# In[4]:
df1['bool'] = np.nan
df2['bool'] = np.nan
df3['bool'] = np.nan
# In[5]:
if use_diff:
df1['bool'] = df1['CC401'] / df1['STF1081'] >= diff_fold_thresh
df2['bool'] = df2['HTH01091'] - df2['STF1081'] >= diff_percent_thresh
df3['bool'] = df3['STF1285'] - df3['STF1081'] >= diff_percent_thresh
else:
df1.loc[np.isinf(df1['CC401']) & (df1['STF1081'] < np.inf), 'bool'] = True
df2.loc[(df2['HTH01091'] >= max_percent_thresh) & (df2['STF1081'] <= min_percent_thresh), 'bool'] = True
df3.loc[(df3['STF1285'] >= max_percent_thresh) & (df3['STF1081'] <= min_percent_thresh), 'bool'] = True
# In[6]:
intersect = | pd.merge(df1, df2, how='inner', on=[geneSymbolColumn, 'bool']) | pandas.merge |
import flask
import web3
from flask_cors import CORS
from flask import request
import pandas as pd
from flask import request
import hashlib
import base64
BUFF_SIZE = 65536
num_file = 0
app = flask.Flask(__name__)
app.config["DEBUG"] = True
CORS(app)
csv_file = "file.csv"
bdd = pd.DataFrame()
#fonction d'initialisation de la bdd pour les test
def init_bdd():
global bdd
bdd = pd.DataFrame({"file":["file1","file2"],"licence":[1,5],"price":[1,1]})
bdd['hash'] = bdd.apply(calcul_hash,axis=1)
#calcul le hash d'un item
def calcul_hash(item):
global num_file
num_file+=1
return str(num_file)
#route pour ajouter un nouveau produit
@app.route('/new_product', methods=['POST'])
def add_product():
global num_file
global bdd
num_file +=1
my_json = request.get_json()
print("\n\n",my_json,"\n\n",request.data)
public_key = my_json['public_key']
product = my_json['file']
my_b = bytes(product,'utf-8')
my_str = base64.b64decode(my_b).decode('utf-8')
my_final_b = bytes(my_str,'utf-8')
print(my_final_b)
#licence = my_json['number']
price = my_json['price']
file_name = "new_file_"+str(num_file)
new_file = open(file_name,"wb")
new_file.write(my_final_b)
new_file.close()
#calcul du hash du produit
m = hashlib.sha256()
file_r = open(file_name,"rb")
while True:
data = file_r.read(BUFF_SIZE)
if not data:
break
m.update(data)
file_r.close()
my_hash = m.hexdigest()
#ajout du produit à la bdd
bdd = bdd.append({"file":file_name,"licence":1,"price":price, "hash":my_hash},ignore_index=True)
save_bdd()
return flask.jsonify(my_hash)
#route pour ajouter une nouvelle licence à un produit
@app.route('/new_licence', methods=['POST'])
def add_licence():
global bdd
my_json = request.get_json()
license = my_json["number"]
price = my_json["price"]
hash = my_json["hash"]
file = bdd[bdd["hash"]==hash]["hash"].iloc[0]
#get id du produit, le nombre d'unité et le prix associé à ce nombre
bdd = bdd.append({"file":file,"licence":license,"price":price, "hash":hash},ignore_index=True)
save_bdd()
return flask.jsonify(True)
#retourne une pièce à partir d'un hash
@app.route('/piece_from_hash', methods=['POST'])
def get_piece_from_hash():
my_json = request.get_json()
item = bdd[bdd["hash"]==my_json['hash']]
if len(item)>0:
file_name = item["file"].iloc[0]
my_file = open(file_name)
data = my_file.read()
return flask.jsonify(data)
else:
return flask.jsonify(False)
#retourne une pièce à partir d'un hash
@app.route('/price_from_hash', methods=['POST'])
def get_price_from_hash():
my_json = request.get_json()
item = bdd[bdd["hash"]==my_json['hash']]
if len(item)>0:
return flask.jsonify(item[["licence","price"]].to_dict("records"))
else:
return flask.jsonify(False)
#rertourne l'ensemble des pièces et leurs prix (front pour afficher)
@app.route('/all_piece', methods=['GET'])
def get_all_piece():
return flask.jsonify(bdd.to_dict("records"))
#load the BDD
def load_bdd():
global bdd
bdd = | pd.read_csv(csv_file) | pandas.read_csv |
import numpy as np
import pandas as pd
from collections import OrderedDict
from pandas.api.types import is_numeric_dtype, is_object_dtype, is_categorical_dtype
from typing import List, Optional, Tuple, Callable
def inspect_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show column types and null values in DataFrame df
"""
resdict = OrderedDict()
# Inspect nulls
null_series = df.isnull().sum()
resdict["column"] = null_series.index
resdict["null_fraction"] = np.round(null_series.values / len(df), 3)
resdict["nulls"] = null_series.values
# Inspect types
types = df.dtypes.values
type_names = [t.name for t in types]
resdict["type"] = type_names
# Is numeric?
is_numeric = []
for col in df.columns:
is_numeric.append(is_numeric_dtype(df[col]))
resdict["is_numeric"] = is_numeric
# Dataframe
resdf = pd.DataFrame(resdict)
resdf.sort_values("null_fraction", inplace=True)
resdf.reset_index(inplace=True, drop=True)
return resdf
def summarize_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show stats;
- rows:
- column types
- columns
- number of columns
- number of cols containing NaN's
"""
# Original DataFrame
(nrows, _) = df.shape
# Stats of DataFrame
stats = inspect_df(df)
data_types = np.unique(stats["type"].values)
resdict = OrderedDict()
# Column: data types
resdict["type"] = data_types
ncols_type = []
ncols_nan = []
n_nans = []
n_total = []
for dt in data_types:
# Column: number of columns with type
nc = len(stats[stats["type"] == dt])
ncols_type.append(nc)
# Column: number of columns with NaNs
nan_cols = stats[(stats["type"] == dt) & (stats["nulls"] > 0)]
ncols_nan.append(len(nan_cols))
# Column: number of NaNs
n_nans.append(nan_cols["nulls"].sum())
# Column: total number of values
n_total.append(nc * nrows)
# Prepare dict for the df
resdict["ncols"] = ncols_type
resdict["ncols_w_nans"] = ncols_nan
resdict["n_nans"] = n_nans
resdict["n_total"] = n_total
# Proportions of NaNs in each column group.
# Division by zero shouldn't occur
nan_frac = np.array(n_nans) / np.array(n_total)
resdict["nan_frac"] = np.round(nan_frac, 2)
resdf = pd.DataFrame(resdict)
resdf.sort_values("type", inplace=True)
resdf.reset_index(inplace=True, drop=True)
return resdf
def add_datefields(
df: pd.DataFrame,
column: str,
drop_original: bool = False,
inplace: bool = False,
attrs: Optional[List[str]] = None,
) -> pd.DataFrame:
""" Add attributes of the date to dataFrame df
"""
raw_date = df[column]
# Pandas datetime attributes
if attrs is None:
attributes = [
"dayofweek",
"dayofyear",
"is_month_end",
"is_month_start",
"is_quarter_end",
"is_quarter_start",
"quarter",
"week",
]
else:
attributes = attrs
# Return new?
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
# Could probably be optimized with pd.apply()
for attr in attributes:
new_column = f"{column}_{attr}"
# https://stackoverflow.com/questions/2612610/
new_vals = [getattr(d, attr) for d in raw_date]
resdf[new_column] = new_vals
if drop_original:
resdf.drop(columns=column, inplace=True)
return resdf
def add_nan_columns(
df: pd.DataFrame, inplace: bool = False, column_list: Optional[List[str]] = None
) -> pd.DataFrame:
""" For each column containing NaNs, add a boolean
column specifying if the column is NaN. Can be used
if the data is later imputated.
"""
if column_list is not None:
nan_columns = column_list
else:
# Get names of columns containing at least one NaN
temp = df.isnull().sum() != 0
nan_columns = temp.index[temp.values]
# Return new?
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
for column in nan_columns:
new_column = f"{column}_isnull"
nans = df[column].isnull()
resdf[new_column] = nans
return resdf
def numeric_nans(df: pd.DataFrame) -> pd.DataFrame:
""" Inspect numerical NaN values of a DataFrame df
"""
stats = inspect_df(df)
nan_stats = stats.loc[stats["is_numeric"] & (stats["nulls"] > 0)].copy(deep=True)
len_uniques = []
uniques = []
for row in nan_stats["column"].values:
uniq = np.unique(df[row][df[row].notnull()].values)
len_uniques.append(len(uniq))
uniques.append(uniq)
nan_stats["num_uniques"] = len_uniques
nan_stats["uniques"] = uniques
nan_stats.reset_index(inplace=True, drop=True)
return nan_stats
def categorize_df(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
inplace: bool = False,
drop_original: bool = True,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
""" Categorize values in columns, and replace value with category.
If no columns are given, default to all 'object' columns
"""
if columns is not None:
cat_cols = columns
else:
cat_cols = df.columns[[dt.name == "object" for dt in df.dtypes.values]]
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
df_codes = []
df_cats = []
n_cats = []
for column in cat_cols:
new_column = f"{column}_cat"
cat_column = df[column].astype("category")
# By default, NaN is -1. We convert to zero by incrementing all.
col_codes = cat_column.cat.codes + 1
resdf[new_column] = col_codes
# DataFrame with the codes
df_codes.append(col_codes)
df_cats.append(cat_column.cat.categories)
n_cats.append(len(np.unique(col_codes)))
cat_dict = OrderedDict()
cat_dict["column"] = cat_cols
# MyPy picks up an error in the next line. Bug is where?
# Additionally, Flake8 will report the MyPy ignore as an error
cat_dict["n_categories"] = n_cats # type: ignore[assignment] # noqa: F821,F821
cat_dict["categories"] = df_cats
cat_dict["codes"] = df_codes
cat_df = pd.DataFrame(cat_dict)
if drop_original:
resdf.drop(columns=cat_cols, inplace=True)
return (resdf, cat_df)
def replace_numeric_nulls(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
function: Callable = np.median,
inplace: bool = False,
) -> pd.DataFrame:
""" Replace nulls in all numerical column with the median (default) or
another callable function that works on NumPy arrays
"""
if columns is None:
columns = [
colname for colname, column in df.items() if is_numeric_dtype(column)
]
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
fillers = OrderedDict()
for column in columns:
values = resdf[resdf[column].notnull()][column].values
fillers[column] = function(values)
resdf.fillna(value=fillers, inplace=True)
return resdf
def object_nan_to_empty(df: pd.DataFrame, inplace: bool = False) -> pd.DataFrame:
""" Replace NaN in Pandas object columns with an empty string
indicating a missing value.
"""
columns = [colname for colname, column in df.items() if | is_object_dtype(column) | pandas.api.types.is_object_dtype |
from IPython.display import display
import pandas as pd
import pyomo.environ as pe
import numpy as np
import csv
import os
import shutil
class inosys:
def __init__(self, inp_folder, ref_bus, dshed_cost = 1000000, rshed_cost = 500, phase = 3, vmin=0.85, vmax=1.15, sbase = 1, sc_fa = 1):
'''
Initialise the investment and operation problem.
:param str inp_folder: The input directory for the data. It expects to find several CSV files detailing the system input data (Default current folder)
:param float dshed_cost: Demand Shedding Price (Default 1000000)
:param float rshed_cost: Renewable Shedding Price (Default 500)
:param int phase: Number of Phases (Default 3)
:param float vmin: Minimum node voltage (Default 0.85)
:param float vmax: Maximum node voltage (Default 1.15)
:param float sbase: Base Apparent Power (Default 1 kW)
:param int ref_bus: Reference node
:param float sc_fa: Scaling Factor (Default 1)
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.inosys("wat_inv", ref_bus = 260)
'''
self.cgen = pd.read_csv(inp_folder + os.sep + 'cgen_dist.csv')
self.egen = | pd.read_csv(inp_folder + os.sep + 'egen_dist.csv') | pandas.read_csv |
import tensorflow as tf
import numpy as np
import random
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM, Dropout
from keras.utils import to_categorical
from keras import optimizers
from keras import metrics
from keras import backend as K
from datetime import datetime, timedelta
import pandas as pd
from copy import deepcopy
## <NAME>
## <NAME>
## <NAME>
seed = 123
random.seed(seed)
np.random.seed(seed)
class BasicTemplateAlgorithm(QCAlgorithm):
'''Basic template algorithm simply initializes the date range and cash'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.session = K.get_session()
self.graph = tf.get_default_graph()
self.SetStartDate(2018,8,1) #Set Start Date
self.SetEndDate(2018,11,21) #Set End Date
self.SetCash(100000) #Set Strategy Cash
## start the Keras/ Tensorflow session
self.session = K.get_session()
self.graph = tf.get_default_graph()
## set the currency pair that we are trading, and the correlated currency pair
self.currency = "AUDUSD"
self.AddForex(self.currency, Resolution.Daily)
self.correl_currency = "USDCHF"
self.AddForex(self.correl_currency, Resolution.Daily)
## define a long list, short list and portfolio
self.long_list, self.short_list = [], []
# Initialise indicators
self.rsi = RelativeStrengthIndex(9)
self.bb = BollingerBands(14, 2, 2)
self.macd = MovingAverageConvergenceDivergence(12, 26, 9)
self.stochastic = Stochastic(14, 3, 3)
self.ema = ExponentialMovingAverage(9)
## Arrays to store the past indicators
prev_rsi, prev_bb, prev_macd, lower_bb, upper_bb, sd_bb, prev_stochastic, prev_ema = [],[],[],[],[],[],[],[]
## Make history calls for both currency pairs
self.currency_data = self.History([self.currency], 150, Resolution.Daily) # Drop the first 20 for indicators to warm up
self.correl_data = self.History([self.correl_currency], 150, Resolution.Daily)
## save the most recent open and close
ytd_open = self.currency_data["open"][-1]
ytd_close = self.currency_data["close"][-1]
## remove yesterday's data. We will query this onData
self.currency_data = self.currency_data[:-1]
self.correl_data = self.correl_data[:-1]
## iterate over past data to update the indicators
for tup in self.currency_data.loc[self.currency].itertuples():
# making Ibasedatabar for stochastic
bar = QuoteBar(tup.Index,
self.currency,
Bar(tup.bidclose, tup.bidhigh, tup.bidlow, tup.bidopen),
0,
Bar(tup.askclose, tup.askhigh, tup.asklow, tup.askopen),
0,
timedelta(days=1)
)
self.stochastic.Update(bar)
prev_stochastic.append(float(self.stochastic.ToString()))
self.rsi.Update(tup.Index, tup.close)
prev_rsi.append(float(self.rsi.ToString()))
self.bb.Update(tup.Index, tup.close)
prev_bb.append(float(self.bb.ToString()))
lower_bb.append(float(self.bb.LowerBand.ToString()))
upper_bb.append(float(self.bb.UpperBand.ToString()))
sd_bb.append(float(self.bb.StandardDeviation.ToString()))
self.macd.Update(tup.Index, tup.close)
prev_macd.append(float(self.macd.ToString()))
self.ema.Update(tup.Index, tup.close)
prev_ema.append(float(self.ema.ToString()))
## Forming the Indicators df
## This is common to the Price Prediction
rsi_df = pd.DataFrame(prev_rsi, columns = ["rsi"])
macd_df = pd.DataFrame(prev_macd, columns = ["macd"])
upper_bb_df = pd.DataFrame(upper_bb, columns = ["upper_bb"])
lower_bb_df = pd.DataFrame(lower_bb, columns = ["lower_bb"])
sd_bb_df = pd.DataFrame(sd_bb, columns = ["sd_bb"])
stochastic_df = pd.DataFrame(prev_stochastic, columns = ["stochastic"])
ema_df = | pd.DataFrame(prev_ema, columns=["ema"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import requests
import pandas as pd
import json
from tqdm import tqdm
PATH = '../../'
PATH_STATS = "../../data/france/stats/"
# In[5]:
# Download data from Santé publique France and export it to local files
def download_data_hosp_fra_clage():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/08c18e08-6780-452d-9b8c-ae244ad529b3")
with open(PATH + 'data/france/donnees-hosp-fra-clage.csv', 'wb') as f:
f.write(data.content)
def download_data_opencovid():
data = requests.get("https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv")
with open(PATH + 'data/france/donnees-opencovid.csv', 'wb') as f:
f.write(data.content)
def download_data_vue_ensemble():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71")
with open(PATH + 'data/france/synthese-fra.csv', 'wb') as f:
f.write(data.content)
def download_data_variants():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/848debc4-0e42-4e3b-a176-afc285ed5401") #https://www.data.gouv.fr/fr/datasets/r/c43d7f3f-c9f5-436b-9b26-728f80e0fd52
data_reg = requests.get("https://www.data.gouv.fr/fr/datasets/r/5ff0cad6-f150-47ea-a4e0-57e354c1b2a4") #https://www.data.gouv.fr/fr/datasets/r/73e8851a-d851-43f8-89e4-6178b35b7127
with open(PATH + 'data/france/donnees-variants.csv', 'wb') as f:
f.write(data.content)
with open(PATH + 'data/france/donnees-variants-reg.csv', 'wb') as f:
f.write(data.content)
def download_data_variants_deps():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/4d3e5a8b-9649-4c41-86ec-5420eb6b530c") #https://www.data.gouv.fr/fr/datasets/r/16f4fd03-797f-4616-bca9-78ff212d06e8
with open(PATH + 'data/france/donnees-variants-deps.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_fra():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/efe23314-67c4-45d3-89a2-3faef82fae90")
with open(PATH + 'data/france/donnees-vacsi-fra.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_reg():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/735b0df8-51b4-4dd2-8a2d-8e46d77d60d8")
with open(PATH + 'data/france/donnees-vacsi-reg.csv', 'wb') as f:
f.write(data.content)
def download_data_vacsi_dep():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/4f39ec91-80d7-4602-befb-4b522804c0af")
with open(PATH + 'data/france/donnees-vacsi-dep.csv', 'wb') as f:
f.write(data.content)
def download_data_obepine():
data = requests.get("https://www.data.gouv.fr/fr/datasets/r/031b79a4-5ee1-4f40-a804-b8abec3e99a6") #https://www.data.gouv.fr/fr/datasets/r/ba71be57-5932-4298-81ea-aff3a12a440c
with open(PATH + 'data/france/donnees_obepine_regions.csv', 'wb') as f:
f.write(data.content)
def download_data_donnees_vaccination_par_pathologie():
data = requests.get("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-pathologie/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B")
with open(PATH + 'data/france/donnees-vaccination-par-pathologie.csv', 'wb') as f:
f.write(data.content)
def import_data_donnees_vaccination_par_pathologie():
df = pd.read_csv(PATH + 'data/france/donnees-vaccination-par-pathologie.csv', sep=None)
return df
def download_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement():
data = requests.get("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-tranche-dage-type-de-vaccin-et-departement/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B")
with open(PATH + 'data/france/donnees-tranche-dage-departement.csv', 'wb') as f:
f.write(data.content)
def import_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement():
df = pd.read_csv(PATH + 'data/france/donnees-tranche-dage-departement.csv', sep=None)
return df
def import_data_obepine():
df = pd.read_csv(PATH + 'data/france/donnees_obepine_regions.csv', sep=None)
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df = df.merge(right=df_reg_pop, left_on="Code_Region", right_on="code")
return df
def import_data_metropoles():
df_metro = pd.read_csv(PATH + 'data/france/donnes-incidence-metropoles.csv', sep=",")
epci = pd.read_csv(PATH + 'data/france/metropole-epci.csv', sep=";", encoding="'windows-1252'")
df_metro = df_metro.merge(epci, left_on='epci2020', right_on='EPCI').drop(['EPCI'], axis=1)
return df_metro
def import_data_hosp_clage():
df_hosp = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-clage-covid19.csv', sep=";")
df_hosp = df_hosp.groupby(["reg", "jour", "cl_age90"]).first().reset_index()
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_hosp = df_hosp.merge(df_reg_pop, left_on="reg", right_on="code")
return df_hosp
def import_data_tests_viros():
df = pd.read_csv(PATH + 'data/france/tests_viro-dep-quot.csv', sep=";")
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_dep_reg = pd.read_csv(PATH + 'data/france/departments_regions_france_2016.csv', sep=",")
df["dep"] = df["dep"].astype(str)
df["dep"] = df["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_dep_reg["departmentCode.astype"] = df_dep_reg.departmentCode.astype(str)
df = df.merge(df_dep_reg, left_on="dep", right_on="departmentCode", how="left")
df = df.merge(df_reg_pop, left_on="regionCode", right_on="code", how="left")
return df
def import_data_hosp_ad_age():
df = pd.read_csv('https://www.data.gouv.fr/fr/datasets/r/dc7663c7-5da9-4765-a98b-ba4bc9de9079', sep=";")
return df
def import_data_new():
df_new = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', sep=";")
return df_new
def import_data_df():
df = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19.csv', sep=";")
return df
def import_data_variants():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
#df_variants = df_variants[df_variants.cl_age90==0]
return df_variants
def import_data_variants_deps():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants-deps.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
#df_variants = df_variants[df_variants.cl_age90==0]
return df_variants
def import_data_variants_regs():
df_variants = pd.read_csv(PATH + 'data/france/donnees-variants-regs.csv', sep=";")
df_variants["jour"] = df_variants.semaine.apply(lambda x: x[11:])
df_variants = df_variants[df_variants.cl_age90==0]
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_variants = df_variants.merge(df_reg_pop, left_on="reg", right_on="code")
return df_variants
def import_data_tests_sexe():
df = pd.read_csv(PATH + 'data/france/tests_viro-fra-covid19.csv', sep=";")
return df
def import_data_vue_ensemble():
df = pd.read_csv(PATH + 'data/france/synthese-fra.csv', sep=",")
df = df.sort_values(["date"])
with open(PATH_STATS + 'vue-ensemble.json', 'w') as outfile:
dict_data = {"cas": int(df["total_cas_confirmes"].diff().values[-1]), "update": df.date.values[-1][-2:] + "/" + df.date.values[-1][-5:-3]}
json.dump(dict_data, outfile)
return df
def import_data_opencovid():
df = pd.read_csv(PATH + 'data/france/donnees-opencovid.csv', sep=",")
"""with open(PATH_STATS + 'opencovid.json', 'w') as outfile:
dict_data = {"cas": int(df["cas_confirmes"].values[-1]), "update": df.index.values[-1][-2:] + "/" + df.index.values[-1][-5:-3]}
json.dump(dict_data, outfile)"""
return df
def import_data_vacsi_a_fra():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-fra.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_vacsi_reg():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-reg.csv', sep=";")
return df
def import_data_vacsi_dep():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-dep.csv', sep=";")
return df
def import_data_vacsi_fra():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-fra.csv', sep=";")
return df
def import_data_vacsi_a_reg():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-reg.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_vacsi_a_dep():
df = pd.read_csv(PATH + 'data/france/donnees-vacsi-a-dep.csv', sep=";")
df = df[df.clage_vacsi != 0]
return df
def import_data_hosp_fra_clage():
df = pd.read_csv(PATH + 'data/france/donnees-hosp-fra-clage.csv', sep=";").groupby(["cl_age90", "jour"]).sum().reset_index()
df = df[df.cl_age90 != 0]
return df
def download_data():
pbar = tqdm(total=8)
download_data_vacsi_fra()
download_data_vacsi_reg()
download_data_vacsi_dep()
url_metadata = "https://www.data.gouv.fr/fr/organizations/sante-publique-france/datasets-resources.csv"
url_geojson = "https://raw.githubusercontent.com/gregoiredavid/france-geojson/master/departements.geojson"
url_deconf = "https://www.data.gouv.fr/fr/datasets/r/f2d0f955-f9c4-43a8-b588-a03733a38921"
url_opencovid = "https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv"
url_vacsi_a_fra = "https://www.data.gouv.fr/fr/datasets/r/54dd5f8d-1e2e-4ccb-8fb8-eac68245befd"
url_vacsi_a_reg = "https://www.data.gouv.fr/fr/datasets/r/c3ccc72a-a945-494b-b98d-09f48aa25337"
url_vacsi_a_dep = "https://www.data.gouv.fr/fr/datasets/r/83cbbdb9-23cb-455e-8231-69fc25d58111"
pbar.update(1)
metadata = requests.get(url_metadata)
pbar.update(2)
geojson = requests.get(url_geojson)
pbar.update(3)
with open(PATH + 'data/france/metadata.csv', 'wb') as f:
f.write(metadata.content)
pbar.update(4)
with open(PATH + 'data/france/dep.geojson', 'wb') as f:
f.write(geojson.content)
pbar.update(5)
df_metadata = pd.read_csv(PATH + 'data/france/metadata.csv', sep=";")
url_data = "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7" #df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-covid19")]["url"].values[0] #donnees-hospitalieres-classe-age-covid19-2020-10-14-19h00.csv
url_data_new = "https://www.data.gouv.fr/fr/datasets/r/6fadff46-9efd-4c53-942a-54aca783c30c" #df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-nouveaux")]["url"].values[0]
url_tests = df_metadata[df_metadata['url'].str.contains("/donnees-tests-covid19-labo-quotidien")]["url"].values[0]
url_metropoles = "https://www.data.gouv.fr/fr/datasets/r/61533034-0f2f-4b16-9a6d-28ffabb33a02" #df_metadata[df_metadata['url'].str.contains("/sg-metro-opendata")]["url"].max()
url_incidence = df_metadata[df_metadata['url'].str.contains("/sp-pe-tb-quot")]["url"].values[0]
url_tests_viro = df_metadata[df_metadata['url'].str.contains("/sp-pos-quot-dep")]["url"].values[0]
url_sursaud = df_metadata[df_metadata['url'].str.contains("sursaud.*quot.*dep")]["url"].values[0]
url_data_clage = df_metadata[df_metadata['url'].str.contains("/donnees-hospitalieres-classe-age-covid19")]["url"].values[0]
url_data_sexe = "https://www.data.gouv.fr/fr/datasets/r/dd0de5d9-b5a5-4503-930a-7b08dc0adc7c" #df_metadata[df_metadata['url'].str.contains("/sp-pos-quot-fra")]["url"].values[0]
pbar.update(6)
data = requests.get(url_data)
data_new = requests.get(url_data_new)
data_tests = requests.get(url_tests)
data_metropoles = requests.get(url_metropoles)
data_deconf = requests.get(url_deconf)
data_sursaud = requests.get(url_sursaud)
data_incidence = requests.get(url_incidence)
data_opencovid = requests.get(url_opencovid)
data_vacsi_a_fra = requests.get(url_vacsi_a_fra)
data_vacsi_a_reg = requests.get(url_vacsi_a_reg)
data_vacsi_a_dep = requests.get(url_vacsi_a_dep)
data_tests_viro = requests.get(url_tests_viro)
data_clage = requests.get(url_data_clage)
data_sexe = requests.get(url_data_sexe)
pbar.update(7)
with open(PATH + 'data/france/donnes-hospitalieres-covid19.csv', 'wb') as f:
f.write(data.content)
with open(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', 'wb') as f:
f.write(data_new.content)
with open(PATH + 'data/france/donnes-tests-covid19-quotidien.csv', 'wb') as f:
f.write(data_tests.content)
with open(PATH + 'data/france/donnes-incidence-metropoles.csv', 'wb') as f:
f.write(data_metropoles.content)
with open(PATH + 'data/france/indicateurs-deconf.csv', 'wb') as f:
f.write(data_deconf.content)
with open(PATH + 'data/france/sursaud-covid19-departement.csv', 'wb') as f:
f.write(data_sursaud.content)
with open(PATH + 'data/france/taux-incidence-dep-quot.csv', 'wb') as f:
f.write(data_incidence.content)
with open(PATH + 'data/france/tests_viro-dep-quot.csv', 'wb') as f:
f.write(data_tests_viro.content)
with open(PATH + 'data/france/donnes-hospitalieres-clage-covid19.csv', 'wb') as f:
f.write(data_clage.content)
with open(PATH + 'data/france/tests_viro-fra-covid19.csv', 'wb') as f:
f.write(data_sexe.content)
with open(PATH + 'data/france/donnees-opencovid.csv', 'wb') as f:
f.write(data_opencovid.content)
with open(PATH + 'data/france/donnees-vacsi-a-fra.csv', 'wb') as f:
f.write(data_vacsi_a_fra.content)
with open(PATH + 'data/france/donnees-vacsi-a-reg.csv', 'wb') as f:
f.write(data_vacsi_a_reg.content)
with open(PATH + 'data/france/donnees-vacsi-a-dep.csv', 'wb') as f:
f.write(data_vacsi_a_dep.content)
pbar.update(8)
# Import data from previously exported files to dataframes
def import_data():
pbar = tqdm(total=8)
pbar.update(1)
df = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19.csv', sep=";")
df.dep = df.dep.astype(str)
df_sursaud = pd.read_csv(PATH + 'data/france/sursaud-covid19-departement.csv', sep=";")
df_sursaud["dep"] = df_sursaud["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_new = pd.read_csv(PATH + 'data/france/donnes-hospitalieres-covid19-nouveaux.csv', sep=";")
df_tests = pd.read_csv(PATH + 'data/france/donnes-tests-covid19-quotidien.csv', sep=";")
df_deconf = pd.read_csv(PATH + 'data/france/indicateurs-deconf.csv', sep=",")
df_incid = pd.read_csv(PATH + 'data/france/taux-incidence-dep-quot.csv', sep=";")
df_incid["dep"] = df_incid["dep"].astype('str')
df_incid["dep"] = df_incid["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
df_tests_viro = pd.read_csv(PATH + 'data/france/tests_viro-dep-quot.csv', sep=";")
df_tests_viro["dep"] = df_tests_viro["dep"].astype('str').str.replace(r"^([1-9])$", lambda m: "0"+m.group(0), regex=True)
pbar.update(2)
df_tests_viro["dep"] = df_tests_viro["dep"].astype('str')
pop_df_incid = df_incid["pop"]
lits_reas = pd.read_csv(PATH + 'data/france/lits_rea.csv', sep=",")
df_regions = pd.read_csv(PATH + 'data/france/departments_regions_france_2016.csv', sep=",")
df_reg_pop = pd.read_csv(PATH + 'data/france/population_grandes_regions.csv', sep=",")
df_dep_pop = pd.read_csv(PATH + 'data/france/dep-pop.csv', sep=";")
###
df = df.merge(df_regions, left_on='dep', right_on='departmentCode')
df = df.merge(df_reg_pop, left_on='regionName', right_on='regionName')
df = df.merge(df_dep_pop, left_on='dep', right_on='dep')
df = df[df["sexe"] == 0]
df['hosp_nonrea'] = df['hosp'] - df['rea']
df = df.merge(lits_reas, left_on="departmentName", right_on="nom_dpt")
#df_tests_viro = df_tests_viro[df_tests_viro["cl_age90"] == 0]
df_incid = df_incid.merge(df_regions, left_on='dep', right_on='departmentCode')
if "pop" in df_tests_viro.columns:
df_incid = df_incid.merge(df_tests_viro[df_tests_viro["cl_age90"] == 0].drop("pop", axis=1).drop("P", axis=1).drop("cl_age90", axis=1), left_on=['jour', 'dep'], right_on=['jour', 'dep'])
else:
df_incid = df_incid.merge(df_tests_viro[df_tests_viro["cl_age90"] == 0].drop("P", axis=1).drop("cl_age90", axis=1), left_on=['jour', 'dep'], right_on=['jour', 'dep'])
df_new = df_new.merge(df_regions, left_on='dep', right_on='departmentCode')
df_new = df_new.merge(df_reg_pop, left_on='regionName', right_on='regionName')
df_new = df_new.merge(df_dep_pop, left_on='dep', right_on='dep')
df_new['incid_hosp_nonrea'] = df_new['incid_hosp'] - df_new['incid_rea']
df_sursaud = df_sursaud.merge(df_regions, left_on='dep', right_on='departmentCode')
df_sursaud = df_sursaud.merge(df_reg_pop, left_on='regionName', right_on='regionName')
df_sursaud = df_sursaud.merge(df_dep_pop, left_on='dep', right_on='dep')
df_sursaud = df_sursaud[df_sursaud["sursaud_cl_age_corona"] == "0"]
df_sursaud["taux_covid"] = df_sursaud["nbre_pass_corona"] / df_sursaud["nbre_pass_tot"]
pbar.update(3)
df['rea_pop'] = df['rea']/df['regionPopulation']*100000
df['rea_deppop'] = df['rea']/df['departmentPopulation']*100000
df['rad_pop'] = df['rad']/df['regionPopulation']*100000
df['dc_pop'] = df['dc']/df['regionPopulation']*100000
df['dc_deppop'] = df['dc']/df['departmentPopulation']*100000
df['hosp_pop'] = df['hosp']/df['regionPopulation']*100000
df['hosp_deppop'] = df['hosp']/df['departmentPopulation']*100000
df['hosp_nonrea_pop'] = df['hosp_nonrea']/df['regionPopulation']*100000
pbar.update(4)
df_confirmed = pd.read_csv(PATH + 'data/data_confirmed.csv')
pbar.update(5)
deps = list(dict.fromkeys(list(df['departmentCode'].values)))
for d in deps:
for col in ["dc", "rad", "rea", "hosp_nonrea", "hosp"]:
vals = df[df["dep"] == d][col].diff()
df.loc[vals.index,col+"_new"] = vals
df.loc[vals.index,col+"_new_deppop"] = vals / df.loc[vals.index,"departmentPopulation"]*100000
df_tests = df_tests.drop(['nb_test_h', 'nb_pos_h', 'nb_test_f', 'nb_pos_f'], axis=1)
df_tests = df_tests[df_tests['clage_covid'] == "0"]
pbar.update(6)
# Correction du 14/05 (pas de données)
#cols_to_change = df.select_dtypes(include=np.number).columns.tolist()
#cols_to_change = [s for s in df.columns.tolist() if "new" in s]
df['jour'] = df['jour'].str.replace(r'(.*)/(.*)/(.*)',r'\3-\2-\1')
dates = sorted(list(dict.fromkeys(list(df['jour'].values))))
for dep in | pd.unique(df_incid["dep"].values) | pandas.unique |
__author__ = "<NAME>"
import logging
import os
import re
import sqlite3
import pandas
import numpy
import gzip
from pyarrow import parquet as pq
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.miscellaneous import matrices, PandasHelpers
from genomic_tools_lib.file_formats import Parquet
def get_file_map(args):
r = re.compile(args.parquet_genotype_pattern)
files = os.listdir(args.parquet_genotype_folder)
files = {int(r.search(f).groups()[0]):os.path.join(args.parquet_genotype_folder, f) for f in files if r.search(f)}
p = {}
for k,v in files.items():
g = pq.ParquetFile(v)
p[k] = g
return p
n_ = re.compile("^(\d+)$")
def run(args):
if os.path.exists(args.output):
logging.info("Output already exists, either delete it or move it")
return
logging.info("Getting parquet genotypes")
file_map = get_file_map(args)
logging.info("Getting genes")
with sqlite3.connect(args.model_db) as connection:
# Pay heed to the order. This avoids arbitrariness in sqlite3 loading of results.
extra = | pandas.read_sql("SELECT * FROM EXTRA order by gene", connection) | pandas.read_sql |
import unittest
import numpy as np
import pandas as pd
from pyvvo import cluster
class TestCluster(unittest.TestCase):
def test_euclidean_distance_sum_squared_array(self):
# All numbers are one away, so squares will be 1. Sum of squares
#
a1 = np.array((1, 2, 3))
a2 = np.array((2, 1, 4))
self.assertEqual(3, cluster.euclidean_distance_squared(a1, a2))
def test_euclidean_distance_sum_squared_series(self):
s1 = pd.Series((1, 10, 7, 5), index=['w', 'x', 'y', 'z'])
s2 = pd.Series((3, 5, -1, 10), index=['w', 'x', 'y', 'z'])
# 4+25+64+25 = 118
self.assertEqual(118, cluster.euclidean_distance_squared(s1, s2))
def test_euclidean_distance_sum_squared_dataframe(self):
d1 = pd.DataFrame({'c1': [1, 2, 3], 'c2': [1, 4, 9]})
d2 = pd.DataFrame({'c1': [2, 3, 4], 'c2': [0, -2, 7]})
# row one: 1 + 1
# row two: 1 + 36
# row three: 1 + 4
expected = pd.Series([2, 37, 5])
actual = cluster.euclidean_distance_squared(d1, d2)
self.assertTrue(expected.equals(actual))
def test_euclidean_distance_sum_squared_df_series(self):
v1 = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
v2 = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]})
# row one: 0+1+4
# row two: 1 + 0 + 1
# row three: 4 + 1 + 0
expected = pd.Series([5, 2, 5])
actual = cluster.euclidean_distance_squared(v1, v2)
actual2 = cluster.euclidean_distance_squared(v2, v1)
for s in [actual, actual2]:
with self.subTest():
self.assertTrue(expected.equals(s))
def test_find_best_cluster_1(self):
# Simple test to exactly match one row in cluster data.
cluster_data = pd.DataFrame({'x': [0.6, 0.5, 0.4, 0.3],
'y': [0.6, 0.5, 0.4, 0.3],
'z': [1.0, 1.0, 1.0, 1.0]})
# Selection data should match the second row.
selection_data = pd.Series([0.5, 0.5], index=['x', 'y'])
# Run, use four clusters (so each row becomes a cluster).
best_data, _, _ = cluster.find_best_cluster(cluster_data,
selection_data, 4, 42)
# since we're using 4 clusters, best_data should return a single
# row.
self.assertTrue(best_data.iloc[0].equals(cluster_data.iloc[1]))
def test_find_best_cluster_2(self):
# Simple test to match a pair of rows.
cluster_data = pd.DataFrame({'x': [0.51, 0.50, 0.33, 0.30],
'y': [0.49, 0.50, 0.28, 0.30],
'z': [1.00, 1.00, 1.00, 1.00]})
# Selection data should put us closest to last two rows.
selection_data = pd.Series([0.1, 0.1], index=['x', 'y'])
# Run, use two clusters.
best_data, _, _ = cluster.find_best_cluster(cluster_data,
selection_data, 2, 42)
# Using two clusters, best_data should have two rows.
self.assertTrue(best_data.equals(cluster_data.iloc[-2:]))
def test_find_best_cluster_3(self):
# Clusters influenced by last column (z).
cluster_data = pd.DataFrame({'x': [0.10, 0.11, 0.10, 0.11],
'y': [0.10, 0.11, 0.10, 0.11],
'z': [1.00, 2.00, 8.00, 9.00]})
# Just use a z to select.
selection_data = pd.Series([3], index=['z'])
# Run, use two clusters.
best_data, _, _ = cluster.find_best_cluster(cluster_data,
selection_data, 2, 42)
# Using two clusters, best_data should have two rows.
self.assertTrue(best_data.equals(cluster_data.iloc[0:2]))
def test_feature_scale_1(self):
# Simple Series
x = pd.Series([1, 2, 3, 4])
x_ref = None
expected = pd.Series([0, 1/3, 2/3, 1])
actual = cluster.feature_scale(x, x_ref)
self.assertTrue(expected.equals(actual))
def test_feature_scale_2(self):
# Simple DataFrame
x = pd.DataFrame({'one': [1, 2, 3, 4], 'two': [-1, -2, -3, -4]})
x_ref = None
expected = pd.DataFrame({'one': [0, 1/3, 2/3, 1],
'two': [1, 2/3, 1/3, 0]})
actual = cluster.feature_scale(x, x_ref)
self.assertTrue(expected.equals(actual))
def test_feature_scale_3(self):
# Scale Series given reference series.
x = pd.Series([2, 4, 6, 8])
x_ref = pd.Series([1, 3, 5, 10])
expected = pd.Series([1/9, 3/9, 5/9, 7/9])
actual = cluster.feature_scale(x, x_ref)
self.assertTrue(expected.equals(actual))
def test_feature_scale_4(self):
# DataFrame with all 0 column.
x = pd.DataFrame({'a': [0, 0, 0, 0], 'b': [10, 0, 5, 3]})
expected = pd.DataFrame({'a': [0.0, 0.0, 0.0, 0.0],
'b': [1.0, 0.0, 0.5, 0.3]})
actual = cluster.feature_scale(x, None)
self.assertTrue(expected.equals(actual))
def test_feature_scale_5(self):
# All zero reference.
x = | pd.Series([1, 2, 3, 4]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #carbon content in biomass
c_cont_po_plasma = 0.5454
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S1nu = df1nu['Firewood_other_energy_use'].values
c_firewood_energy_S1pl = df1pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp(t,remainAGB):
return (1-(1-np.exp(-a*t))**b)*remainAGB
#set zero matrix
output_decomp = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp[i:,i] = decomp(t[:len(t)-i],remain_part)
print(output_decomp[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix[:,i] = np.diff(output_decomp[:,i])
i = i + 1
print(subs_matrix[:,:4])
print(len(subs_matrix))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix = subs_matrix.clip(max=0)
print(subs_matrix[:,:4])
#make the results as absolute values
subs_matrix = abs(subs_matrix)
print(subs_matrix[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix)
subs_matrix = np.vstack((zero_matrix, subs_matrix))
print(subs_matrix[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot = (tf,1)
decomp_emissions = np.zeros(matrix_tot)
i = 0
while i < tf:
decomp_emissions[:,0] = decomp_emissions[:,0] + subs_matrix[:,i]
i = i + 1
print(decomp_emissions[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM1nu = DynamicStockModel(t = df1nu['Year'].values, i = df1nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM1pl = DynamicStockModel(t = df1pl['Year'].values, i = df1pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1nu, ExitFlag1nu = TestDSM1nu.dimension_check()
CheckStr1pl, ExitFlag1nu = TestDSM1pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort1nu, ExitFlag1nu = TestDSM1nu.compute_s_c_inflow_driven()
Stock_by_cohort1pl, ExitFlag1pl = TestDSM1pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S1nu, ExitFlag1nu = TestDSM1nu.compute_stock_total()
S1pl, ExitFlag1pl = TestDSM1pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C1nu, ExitFlag1nu = TestDSM1nu.compute_o_c_from_s_c()
O_C1pl, ExitFlag1pl = TestDSM1pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O1nu, ExitFlag1nu = TestDSM1nu.compute_outflow_total()
O1pl, ExitFlag1pl = TestDSM1pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS1nu, ExitFlag1nu = TestDSM1nu.compute_stock_change()
DS1pl, ExitFlag1pl = TestDSM1pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal1nu, ExitFlag1nu = TestDSM1nu.check_stock_balance()
Bal1pl, ExitFlag1pl = TestDSM1pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM1nu.o)
print(TestDSM1pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#plt.plot(TestDSM1.s)
#plt.xlim([0, 100])
#plt.ylim([0,50])
#plt.show()
#%%
#Step (5): Biomass growth
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S1nu = df1nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S1pl = df1pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S1nu = df1nu['PH_Emissions_PO'].values
PH_Emissions_PO_S1pl = df1pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1nu(t,remainAGB_CH4_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1nu
#set zero matrix
output_decomp_CH4_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1nu[i:,i] = decomp_CH4_S1nu(t[:len(t)-i],remain_part_CH4_S1nu)
print(output_decomp_CH4_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1nu[:,i] = np.diff(output_decomp_CH4_S1nu[:,i])
i = i + 1
print(subs_matrix_CH4_S1nu[:,:4])
print(len(subs_matrix_CH4_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1nu = subs_matrix_CH4_S1nu.clip(max=0)
print(subs_matrix_CH4_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1nu = abs(subs_matrix_CH4_S1nu)
print(subs_matrix_CH4_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1nu)
subs_matrix_CH4_S1nu = np.vstack((zero_matrix_CH4_S1nu, subs_matrix_CH4_S1nu))
print(subs_matrix_CH4_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1nu = (tf,1)
decomp_tot_CH4_S1nu = np.zeros(matrix_tot_CH4_S1nu)
i = 0
while i < tf:
decomp_tot_CH4_S1nu[:,0] = decomp_tot_CH4_S1nu[:,0] + subs_matrix_CH4_S1nu[:,i]
i = i + 1
print(decomp_tot_CH4_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1pl(t,remainAGB_CH4_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1pl
#set zero matrix
output_decomp_CH4_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1pl[i:,i] = decomp_CH4_S1pl(t[:len(t)-i],remain_part_CH4_S1pl)
print(output_decomp_CH4_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1pl[:,i] = np.diff(output_decomp_CH4_S1pl[:,i])
i = i + 1
print(subs_matrix_CH4_S1pl[:,:4])
print(len(subs_matrix_CH4_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1pl = subs_matrix_CH4_S1pl.clip(max=0)
print(subs_matrix_CH4_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1pl= abs(subs_matrix_CH4_S1pl)
print(subs_matrix_CH4_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1pl)
subs_matrix_CH4_S1pl = np.vstack((zero_matrix_CH4_S1pl, subs_matrix_CH4_S1pl))
print(subs_matrix_CH4_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1pl = (tf,1)
decomp_tot_CH4_S1pl = np.zeros(matrix_tot_CH4_S1pl)
i = 0
while i < tf:
decomp_tot_CH4_S1pl[:,0] = decomp_tot_CH4_S1pl[:,0] + subs_matrix_CH4_S1pl[:,i]
i = i + 1
print(decomp_tot_CH4_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CH4_Enu(t,remainAGB_CH4_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Enu
#set zero matrix
output_decomp_CH4_Enu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Enu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Enu[i:,i] = decomp_CH4_Enu(t[:len(t)-i],remain_part_CH4_Enu)
print(output_decomp_CH4_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Enu[:,i] = np.diff(output_decomp_CH4_Enu[:,i])
i = i + 1
print(subs_matrix_CH4_Enu[:,:4])
print(len(subs_matrix_CH4_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Enu = subs_matrix_CH4_Enu.clip(max=0)
print(subs_matrix_CH4_Enu[:,:4])
#make the results as absolute values
subs_matrix_CH4_Enu = abs(subs_matrix_CH4_Enu)
print(subs_matrix_CH4_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Enu)
subs_matrix_CH4_Enu = np.vstack((zero_matrix_CH4_Enu, subs_matrix_CH4_Enu))
print(subs_matrix_CH4_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Enu = (tf,1)
decomp_tot_CH4_Enu= np.zeros(matrix_tot_CH4_Enu)
i = 0
while i < tf:
decomp_tot_CH4_Enu[:,0] = decomp_tot_CH4_Enu[:,0] + subs_matrix_CH4_Enu[:,i]
i = i + 1
print(decomp_tot_CH4_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CH4_Epl(t,remainAGB_CH4_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Epl
#set zero matrix
output_decomp_CH4_Epl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Epl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Epl[i:,i] = decomp_CH4_Epl(t[:len(t)-i],remain_part_CH4_Epl)
print(output_decomp_CH4_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Epl[:,i] = np.diff(output_decomp_CH4_Epl[:,i])
i = i + 1
print(subs_matrix_CH4_Epl[:,:4])
print(len(subs_matrix_CH4_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Epl = subs_matrix_CH4_Epl.clip(max=0)
print(subs_matrix_CH4_Epl[:,:4])
#make the results as absolute values
subs_matrix_CH4_Epl = abs(subs_matrix_CH4_Epl)
print(subs_matrix_CH4_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Epl)
subs_matrix_CH4_Epl = np.vstack((zero_matrix_CH4_Epl, subs_matrix_CH4_Epl))
print(subs_matrix_CH4_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Epl = (tf,1)
decomp_tot_CH4_Epl = np.zeros(matrix_tot_CH4_Epl)
i = 0
while i < tf:
decomp_tot_CH4_Epl[:,0] = decomp_tot_CH4_Epl[:,0] + subs_matrix_CH4_Epl[:,i]
i = i + 1
print(decomp_tot_CH4_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1nu,label='CH4_S1nu')
plt.plot(t,decomp_tot_CH4_S1pl,label='CH4_S1pl')
plt.plot(t,decomp_tot_CH4_Enu,label='CH4_Enu')
plt.plot(t,decomp_tot_CH4_Epl,label='CH4_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1nu(t,remainAGB_CO2_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S1nu
#set zero matrix
output_decomp_CO2_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1nu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1nu[i:,i] = decomp_CO2_S1nu(t[:len(t)-i],remain_part_CO2_S1nu)
print(output_decomp_CO2_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1nu[:,i] = np.diff(output_decomp_CO2_S1nu[:,i])
i = i + 1
print(subs_matrix_CO2_S1nu[:,:4])
print(len(subs_matrix_CO2_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1nu = subs_matrix_CO2_S1nu.clip(max=0)
print(subs_matrix_CO2_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1nu = abs(subs_matrix_CO2_S1nu)
print(subs_matrix_CO2_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1nu)
subs_matrix_CO2_S1nu = np.vstack((zero_matrix_CO2_S1nu, subs_matrix_CO2_S1nu))
print(subs_matrix_CO2_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1nu = (tf,1)
decomp_tot_CO2_S1nu = np.zeros(matrix_tot_CO2_S1nu)
i = 0
while i < tf:
decomp_tot_CO2_S1nu[:,0] = decomp_tot_CO2_S1nu[:,0] + subs_matrix_CO2_S1nu[:,i]
i = i + 1
print(decomp_tot_CO2_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1pl(t,remainAGB_CO2_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S1pl
#set zero matrix
output_decomp_CO2_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1pl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1pl[i:,i] = decomp_CO2_S1pl(t[:len(t)-i],remain_part_CO2_S1pl)
print(output_decomp_CO2_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1pl[:,i] = np.diff(output_decomp_CO2_S1pl[:,i])
i = i + 1
print(subs_matrix_CO2_S1pl[:,:4])
print(len(subs_matrix_CO2_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1pl = subs_matrix_CO2_S1pl.clip(max=0)
print(subs_matrix_CO2_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1pl= abs(subs_matrix_CO2_S1pl)
print(subs_matrix_CO2_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1pl)
subs_matrix_CO2_S1pl = np.vstack((zero_matrix_CO2_S1pl, subs_matrix_CO2_S1pl))
print(subs_matrix_CO2_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1pl = (tf,1)
decomp_tot_CO2_S1pl = np.zeros(matrix_tot_CO2_S1pl)
i = 0
while i < tf:
decomp_tot_CO2_S1pl[:,0] = decomp_tot_CO2_S1pl[:,0] + subs_matrix_CO2_S1pl[:,i]
i = i + 1
print(decomp_tot_CO2_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CO2_Enu(t,remainAGB_CO2_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Enu
#set zero matrix
output_decomp_CO2_Enu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Enu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Enu[i:,i] = decomp_CO2_Enu(t[:len(t)-i],remain_part_CO2_Enu)
print(output_decomp_CO2_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Enu[:,i] = np.diff(output_decomp_CO2_Enu[:,i])
i = i + 1
print(subs_matrix_CO2_Enu[:,:4])
print(len(subs_matrix_CO2_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Enu = subs_matrix_CO2_Enu.clip(max=0)
print(subs_matrix_CO2_Enu[:,:4])
#make the results as absolute values
subs_matrix_CO2_Enu = abs(subs_matrix_CO2_Enu)
print(subs_matrix_CO2_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Enu)
subs_matrix_CO2_Enu = np.vstack((zero_matrix_CO2_Enu, subs_matrix_CO2_Enu))
print(subs_matrix_CO2_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Enu = (tf,1)
decomp_tot_CO2_Enu= np.zeros(matrix_tot_CO2_Enu)
i = 0
while i < tf:
decomp_tot_CO2_Enu[:,0] = decomp_tot_CO2_Enu[:,0] + subs_matrix_CO2_Enu[:,i]
i = i + 1
print(decomp_tot_CO2_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CO2_Epl(t,remainAGB_CO2_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Epl
#set zero matrix
output_decomp_CO2_Epl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Epl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Epl[i:,i] = decomp_CO2_Epl(t[:len(t)-i],remain_part_CO2_Epl)
print(output_decomp_CO2_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Epl[:,i] = np.diff(output_decomp_CO2_Epl[:,i])
i = i + 1
print(subs_matrix_CO2_Epl[:,:4])
print(len(subs_matrix_CO2_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Epl = subs_matrix_CO2_Epl.clip(max=0)
print(subs_matrix_CO2_Epl[:,:4])
#make the results as absolute values
subs_matrix_CO2_Epl = abs(subs_matrix_CO2_Epl)
print(subs_matrix_CO2_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Epl)
subs_matrix_CO2_Epl = np.vstack((zero_matrix_CO2_Epl, subs_matrix_CO2_Epl))
print(subs_matrix_CO2_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Epl = (tf,1)
decomp_tot_CO2_Epl = np.zeros(matrix_tot_CO2_Epl)
i = 0
while i < tf:
decomp_tot_CO2_Epl[:,0] = decomp_tot_CO2_Epl[:,0] + subs_matrix_CO2_Epl[:,i]
i = i + 1
print(decomp_tot_CO2_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1nu,label='CO2_S1nu')
plt.plot(t,decomp_tot_CO2_S1pl,label='CO2_S1pl')
plt.plot(t,decomp_tot_CO2_Enu,label='CO2_Enu')
plt.plot(t,decomp_tot_CO2_Epl,label='CO2_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_PF_PO_S1nu = [c_firewood_energy_S1nu, decomp_emissions[:,0], TestDSM1nu.o, PH_Emissions_PO_S1nu, PH_Emissions_HWP_S1nu, decomp_tot_CO2_S1nu[:,0]]
Emissions_PF_PO_S1pl = [c_firewood_energy_S1pl, decomp_emissions[:,0], TestDSM1pl.o, PH_Emissions_PO_S1pl, PH_Emissions_HWP_S1pl, decomp_tot_CO2_S1pl[:,0]]
Emissions_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions[:,0], TestDSM3nu.o, PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu, decomp_tot_CO2_Enu[:,0]]
Emissions_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions[:,0], TestDSM3pl.o, PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl, decomp_tot_CO2_Epl[:,0]]
Emissions_PF_PO_S1nu = [sum(x) for x in zip(*Emissions_PF_PO_S1nu)]
Emissions_PF_PO_S1pl = [sum(x) for x in zip(*Emissions_PF_PO_S1pl)]
Emissions_PF_PO_Enu = [sum(x) for x in zip(*Emissions_PF_PO_Enu)]
Emissions_PF_PO_Epl = [sum(x) for x in zip(*Emissions_PF_PO_Epl)]
#CH4_S1nu
Emissions_CH4_PF_PO_S1nu = decomp_tot_CH4_S1nu[:,0]
#CH4_S1pl
Emissions_CH4_PF_PO_S1pl = decomp_tot_CH4_S1pl[:,0]
#CH4_Enu
Emissions_CH4_PF_PO_Enu = decomp_tot_CH4_Enu[:,0]
#CH4_Epl
Emissions_CH4_PF_PO_Epl = decomp_tot_CH4_Epl[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1nu = Emissions_PF_PO_S1nu
Col2_S1pl = Emissions_PF_PO_S1pl
Col2_Enu = Emissions_PF_PO_Enu
Col2_Epl = Emissions_PF_PO_Epl
Col3_S1nu = Emissions_CH4_PF_PO_S1nu
Col3_S1pl = Emissions_CH4_PF_PO_S1pl
Col3_Enu = Emissions_CH4_PF_PO_Enu
Col3_Epl = Emissions_CH4_PF_PO_Epl
Col4 = flat_list_nucleus
Col5 = Emission_ref
Col6 = flat_list_plasma
#S1
df1_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1nu,'kg_CH4':Col3_S1nu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df1_pl = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1pl,'kg_CH4':Col3_S1pl,'kg_CO2_seq':Col6,'emission_ref':Col5})
#E
df3_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_Enu,'kg_CH4':Col3_Enu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df3_pl = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_Epl,'kg_CH4':Col3_Epl,'kg_CO2_seq':Col6,'emission_ref':Col5})
writer = pd.ExcelWriter('emissions_seq_PF_PO_EC.xlsx', engine = 'xlsxwriter')
df1_nu.to_excel(writer, sheet_name = 'S1_nucleus', header=True, index=False )
df1_pl.to_excel(writer, sheet_name = 'S1_plasma', header=True, index=False)
df3_nu.to_excel(writer, sheet_name = 'E_nucleus', header=True, index=False)
df3_pl.to_excel(writer, sheet_name = 'E_plasma', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA - wood-based scenarios
# Step (10): Set General Parameters for Dynamic LCA calculation
# General Parameters
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S1_nucleus
df = pd.read_excel('emissions_seq_PF_PO_EC.xlsx', 'S1_nucleus') # can also index sheet by name or fetch all sheets
emission_CO2_S1nu = df['kg_CO2'].tolist()
emission_CH4_S1nu = df['kg_CH4'].tolist()
emission_CO2_seq_S1nu = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_plasma
df = pd.read_excel('emissions_seq_PF_PO_EC.xlsx', 'S1_plasma')
emission_CO2_S1pl = df['kg_CO2'].tolist()
emission_CH4_S1pl = df['kg_CH4'].tolist()
emission_CO2_seq_S1pl = df['kg_CO2_seq'].tolist()
#read E_nucleus
df = pd.read_excel('emissions_seq_PF_PO_EC.xlsx', 'E_nucleus') # can also index sheet by name or fetch all sheets
emission_CO2_Enu = df['kg_CO2'].tolist()
emission_CH4_Enu = df['kg_CH4'].tolist()
emission_CO2_seq_Enu = df['kg_CO2_seq'].tolist()
#read E_plasma
df = pd.read_excel('emissions_seq_PF_PO_EC.xlsx', 'E_plasma')
emission_CO2_Epl = df['kg_CO2'].tolist()
emission_CH4_Epl = df['kg_CH4'].tolist()
emission_CO2_seq_Epl = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S1_nucleus
df = pd.read_excel('NonRW_PF_PO_EC.xlsx', 'PF_PO_S1nu') # can also index sheet by name or fetch all sheets
emission_NonRW_S1nu = df['NonRW_emissions'].tolist()
emission_Diesel_S1nu = df['Diesel_emissions'].tolist()
emission_NonRW_seq_S1nu = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_plasma
df = pd.read_excel('NonRW_PF_PO_EC.xlsx', 'PF_PO_S1pl')
emission_NonRW_S1pl = df['NonRW_emissions'].tolist()
emission_Diesel_S1pl = df['Diesel_emissions'].tolist()
emission_NonRW_seq_S1pl = df['kg_CO2_seq'].tolist()
#read E_nucleus
df = pd.read_excel('NonRW_PF_PO_EC.xlsx', 'PF_PO_Enu') # can also index sheet by name or fetch all sheets
emission_NonRW_Enu = df['NonRW_emissions'].tolist()
emission_Diesel_Enu = df['Diesel_emissions'].tolist()
emission_NonRW_seq_Enu = df['kg_CO2_seq'].tolist()
#read E_plasma
df = pd.read_excel('NonRW_PF_PO_EC.xlsx', 'PF_PO_Epl')
emission_NonRW_Epl = df['NonRW_emissions'].tolist()
emission_Diesel_Epl = df['Diesel_emissions'].tolist()
emission_NonRW_seq_Epl = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
#Step (16): Calculate instantaneous global warming impact (GWI)
##wood-based
#S1_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_S1nu = (tf-1,3)
GWI_inst_S1nu = np.zeros(matrix_GWI_S1nu)
for t in range(0,tf-1):
GWI_inst_S1nu[t,0] = np.sum(np.multiply(emission_CO2_S1nu,DCF_CO2_ti[:,t]))
GWI_inst_S1nu[t,1] = np.sum(np.multiply(emission_CH4_S1nu,DCF_CH4_ti[:,t]))
GWI_inst_S1nu[t,2] = np.sum(np.multiply(emission_CO2_seq_S1nu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1nu = (tf-1,1)
GWI_inst_tot_S1nu = np.zeros(matrix_GWI_tot_S1nu)
GWI_inst_tot_S1nu[:,0] = np.array(GWI_inst_S1nu[:,0] + GWI_inst_S1nu[:,1] + GWI_inst_S1nu[:,2])
print(GWI_inst_tot_S1nu[:,0])
t = np.arange(0,tf-1,1)
#S1_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_S1pl = (tf-1,3)
GWI_inst_S1pl = np.zeros(matrix_GWI_S1pl)
for t in range(0,tf-1):
GWI_inst_S1pl[t,0] = np.sum(np.multiply(emission_CO2_S1pl,DCF_CO2_ti[:,t]))
GWI_inst_S1pl[t,1] = np.sum(np.multiply(emission_CH4_S1pl,DCF_CH4_ti[:,t]))
GWI_inst_S1pl[t,2] = np.sum(np.multiply(emission_CO2_seq_S1pl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1pl = (tf-1,1)
GWI_inst_tot_S1pl = np.zeros(matrix_GWI_tot_S1pl)
GWI_inst_tot_S1pl[:,0] = np.array(GWI_inst_S1pl[:,0] + GWI_inst_S1pl[:,1] + GWI_inst_S1pl[:,2])
print(GWI_inst_tot_S1pl[:,0])
#E_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_Enu = (tf-1,3)
GWI_inst_Enu = np.zeros(matrix_GWI_Enu)
for t in range(0,tf-1):
GWI_inst_Enu[t,0] = np.sum(np.multiply(emission_CO2_Enu,DCF_CO2_ti[:,t]))
GWI_inst_Enu[t,1] = np.sum(np.multiply(emission_CH4_Enu,DCF_CH4_ti[:,t]))
GWI_inst_Enu[t,2] = np.sum(np.multiply(emission_CO2_seq_Enu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_Enu = (tf-1,1)
GWI_inst_tot_Enu = np.zeros(matrix_GWI_tot_Enu)
GWI_inst_tot_Enu[:,0] = np.array(GWI_inst_Enu[:,0] + GWI_inst_Enu[:,1] + GWI_inst_Enu[:,2])
print(GWI_inst_tot_Enu[:,0])
#E_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_Epl = (tf-1,3)
GWI_inst_Epl = np.zeros(matrix_GWI_Epl)
for t in range(0,tf-1):
GWI_inst_Epl[t,0] = np.sum(np.multiply(emission_CO2_Epl,DCF_CO2_ti[:,t]))
GWI_inst_Epl[t,1] = np.sum(np.multiply(emission_CH4_Epl,DCF_CH4_ti[:,t]))
GWI_inst_Epl[t,2] = np.sum(np.multiply(emission_CO2_seq_Epl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_Epl = (tf-1,1)
GWI_inst_tot_Epl = np.zeros(matrix_GWI_tot_Epl)
GWI_inst_tot_Epl[:,0] = np.array(GWI_inst_Epl[:,0] + GWI_inst_Epl[:,1] + GWI_inst_Epl[:,2])
print(GWI_inst_tot_Epl[:,0])
##NonRW
#S1_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1nu = (tf-1,3)
GWI_inst_NonRW_S1nu = np.zeros(matrix_GWI_NonRW_S1nu)
for t in range(0,tf-1):
GWI_inst_NonRW_S1nu[t,0] = np.sum(np.multiply(emission_NonRW_S1nu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1nu[t,1] = np.sum(np.multiply(emission_Diesel_S1nu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1nu[t,2] = np.sum(np.multiply(emission_NonRW_seq_S1nu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1nu = (tf-1,1)
GWI_inst_tot_NonRW_S1nu = np.zeros(matrix_GWI_tot_NonRW_S1nu)
GWI_inst_tot_NonRW_S1nu[:,0] = np.array(GWI_inst_NonRW_S1nu[:,0] + GWI_inst_NonRW_S1nu[:,1] + GWI_inst_NonRW_S1nu[:,2])
print(GWI_inst_tot_NonRW_S1nu[:,0])
t = np.arange(0,tf-1,1)
#S1_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1pl = (tf-1,3)
GWI_inst_NonRW_S1pl = np.zeros(matrix_GWI_NonRW_S1pl)
for t in range(0,tf-1):
GWI_inst_NonRW_S1pl[t,0] = np.sum(np.multiply(emission_NonRW_S1pl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1pl[t,1] = np.sum(np.multiply(emission_Diesel_S1pl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1pl[t,2] = np.sum(np.multiply(emission_NonRW_seq_S1pl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1pl = (tf-1,1)
GWI_inst_tot_NonRW_S1pl = np.zeros(matrix_GWI_tot_NonRW_S1pl)
GWI_inst_tot_NonRW_S1pl[:,0] = np.array(GWI_inst_NonRW_S1pl[:,0] + GWI_inst_NonRW_S1pl[:,1] + GWI_inst_NonRW_S1pl[:,2])
print(GWI_inst_tot_NonRW_S1pl[:,0])
#E_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_Enu = (tf-1,3)
GWI_inst_NonRW_Enu = np.zeros(matrix_GWI_NonRW_Enu)
for t in range(0,tf-1):
GWI_inst_NonRW_Enu[t,0] = np.sum(np.multiply(emission_NonRW_Enu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Enu[t,1] = np.sum(np.multiply(emission_Diesel_Enu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Enu[t,2] = np.sum(np.multiply(emission_NonRW_seq_Enu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_Enu = (tf-1,1)
GWI_inst_tot_NonRW_Enu = np.zeros(matrix_GWI_tot_NonRW_Enu)
GWI_inst_tot_NonRW_Enu[:,0] = np.array(GWI_inst_NonRW_Enu[:,0] + GWI_inst_NonRW_Enu[:,1] + GWI_inst_NonRW_Enu[:,2])
print(GWI_inst_tot_NonRW_Enu[:,0])
#E_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_Epl = (tf-1,3)
GWI_inst_NonRW_Epl = np.zeros(matrix_GWI_NonRW_Epl)
for t in range(0,tf-1):
GWI_inst_NonRW_Epl[t,0] = np.sum(np.multiply(emission_NonRW_Epl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Epl[t,1] = np.sum(np.multiply(emission_Diesel_Epl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Epl[t,2] = np.sum(np.multiply(emission_NonRW_seq_Epl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_Epl = (tf-1,1)
GWI_inst_tot_NonRW_Epl = np.zeros(matrix_GWI_tot_NonRW_Epl)
GWI_inst_tot_NonRW_Epl[:,0] = np.array(GWI_inst_NonRW_Epl[:,0] + GWI_inst_NonRW_Epl[:,1] + GWI_inst_NonRW_Epl[:,2])
print(GWI_inst_tot_NonRW_Epl[:,0])
t = np.arange(0,tf-1,1)
#create zero list to highlight the horizontal line for 0
def zerolistmaker(n):
listofzeros = [0] * (n)
return listofzeros
#convert to flat list
GWI_inst_tot_NonRW_S1nu = np.array([item for sublist in GWI_inst_tot_NonRW_S1nu for item in sublist])
GWI_inst_tot_NonRW_S1pl = np.array([item for sublist in GWI_inst_tot_NonRW_S1pl for item in sublist])
GWI_inst_tot_NonRW_Enu = np.array([item for sublist in GWI_inst_tot_NonRW_Enu for item in sublist])
GWI_inst_tot_NonRW_Epl = np.array([item for sublist in GWI_inst_tot_NonRW_Epl for item in sublist])
GWI_inst_tot_S1nu= np.array([item for sublist in GWI_inst_tot_S1nu for item in sublist])
GWI_inst_tot_S1pl = np.array([item for sublist in GWI_inst_tot_S1pl for item in sublist])
GWI_inst_tot_Enu = np.array([item for sublist in GWI_inst_tot_Enu for item in sublist])
GWI_inst_tot_Epl = np.array([item for sublist in GWI_inst_tot_Epl for item in sublist])
plt.plot(t, GWI_inst_tot_NonRW_S1nu, color='lightcoral', label='NR_M_EC_nucleus', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_S1pl, color='deeppink', label='NR_M_EC_plasma', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
plt.plot(t, GWI_inst_tot_S1nu, color='lightcoral', label='M_EC_nucleus')
plt.plot(t, GWI_inst_tot_S1pl, color='deeppink', label='M_EC_plasma')
plt.plot(t, GWI_inst_tot_Enu, color='royalblue', label='E_nucleus')
plt.plot(t, GWI_inst_tot_Epl, color='deepskyblue', label='E_plasma')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_inst_tot_NonRW_S1pl, GWI_inst_tot_NonRW_Enu, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.xlim(0,200)
plt.ylim(-0.5e-9,1.4e-9)
plt.title('Instantaneous GWI, PF_PO_EC')
plt.xlabel('Time (year)')
#plt.ylabel('GWI_inst (10$^{-13}$ W/m$^2$)')
plt.ylabel('GWI_inst (W/m$^2$)')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_PF_PO_EC', dpi=300)
plt.show()
#%%
#Step (17): Calculate cumulative global warming impact (GWI)
##Wood-based
GWI_cum_S1nu = np.cumsum(GWI_inst_tot_S1nu)
GWI_cum_S1pl = np.cumsum(GWI_inst_tot_S1pl)
GWI_cum_Enu = np.cumsum(GWI_inst_tot_Enu)
GWI_cum_Epl = np.cumsum(GWI_inst_tot_Epl)
##NonRW
GWI_cum_NonRW_S1nu = np.cumsum(GWI_inst_tot_NonRW_S1nu)
GWI_cum_NonRW_S1pl = np.cumsum(GWI_inst_tot_NonRW_S1pl)
GWI_cum_NonRW_Enu = np.cumsum(GWI_inst_tot_NonRW_Enu)
GWI_cum_NonRW_Epl = np.cumsum(GWI_inst_tot_NonRW_Epl)
plt.xlabel('Time (year)')
#plt.ylabel('GWI_cum (10$^{-11}$ W/m$^2$)')
plt.ylabel('GWI_cum (W/m$^2$)')
plt.xlim(0,200)
plt.ylim(-0.3e-7,2e-7)
plt.title('Cumulative GWI, PF_PO_EC')
plt.plot(t, GWI_cum_NonRW_S1nu , color='lightcoral', label='NR_M_EC_nucleus', ls='--')
plt.plot(t, GWI_cum_NonRW_S1pl, color='deeppink', label='NR_M_EC_plasma', ls='--')
plt.plot(t, GWI_cum_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
plt.plot(t, GWI_cum_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
plt.plot(t, GWI_cum_S1nu, color='lightcoral', label='M_EC_nucleus')
plt.plot(t, GWI_cum_S1pl, color='deeppink', label='M_EC_plasma')
plt.plot(t, GWI_cum_Enu, color='royalblue', label='E_nucleus')
plt.plot(t, GWI_cum_Epl, color='deepskyblue', label='E_plasma')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
plt.grid(True)
#plt.fill_between(t, GWI_cum_NonRW_S1pl, GWI_cum_NonRW_Enu, color='lightcoral', alpha=0.3)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_NonRW_PF_PO_EC', dpi=300)
plt.show()
#%%
#Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation
t = np.arange(0,tf-1,1)
matrix_GWI_ref = (tf-1,1)
GWI_inst_ref = np.zeros(matrix_GWI_S1nu)
for t in range(0,tf-1):
GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t]))
#print(GWI_inst_ref[:,0])
len(GWI_inst_ref)
#determine the GWI cumulative for the emission reference
t = np.arange(0,tf-1,1)
GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0])
#print(GWI_cum_ref)
plt.xlabel('Time (year)')
plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)')
plt.plot(t, GWI_cum_ref)
len(GWI_cum_ref)
#%%
#Step (19): Calculate dynamic global warming potential (GWPdyn)
#convert the GWPdyn to tCO2 (divided by 1000)
##Wood-based
GWP_dyn_cum_S1nu = [x/(y*1000) for x,y in zip(GWI_cum_S1nu, GWI_cum_ref)]
GWP_dyn_cum_S1pl = [x/(y*1000) for x,y in zip(GWI_cum_S1pl, GWI_cum_ref)]
GWP_dyn_cum_Enu = [x/(y*1000) for x,y in zip(GWI_cum_Enu, GWI_cum_ref)]
GWP_dyn_cum_Epl = [x/(y*1000) for x,y in zip(GWI_cum_Epl, GWI_cum_ref)]
##NonRW
GWP_dyn_cum_NonRW_S1nu = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1nu, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1pl = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1pl, GWI_cum_ref)]
GWP_dyn_cum_NonRW_Enu = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_Enu, GWI_cum_ref)]
GWP_dyn_cum_NonRW_Epl = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_Epl, GWI_cum_ref)]
fig=plt.figure()
fig.show()
ax=fig.add_subplot(111)
ax.plot(t, GWP_dyn_cum_NonRW_S1nu, color='lightcoral', label='NR_M_EC_nucleus', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_S1pl, color='deeppink', label='NR_M_EC_plasma', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
ax.plot(t, GWP_dyn_cum_S1nu, color='lightcoral', label='M_EC_nucleus')
ax.plot(t, GWP_dyn_cum_S1pl, color='deeppink', label='M_EC_plasma')
ax.plot(t, GWP_dyn_cum_Enu, color='royalblue', label='E_nucleus')
ax.plot(t, GWP_dyn_cum_Epl, color='deepskyblue', label='E_plasma')
ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWP_dyn_cum_NonRW_S1pl, GWP_dyn_cum_NonRW_Enu, color='lightcoral', alpha=0.3)
plt.grid(True)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax.set_xlabel('Time (year)')
ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)')
ax.set_xlim(0,200)
ax.set_ylim(-250,1400)
ax.set_title('Dynamic GWP, PF_PO_EC')
plt.draw()
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_PF_PO_EC', dpi=300)
#plt.show()
#%%
#Step (20): Exporting the data behind result graphs to Excel
year = []
for x in range (0, 201):
year.append(x)
### Create Column
Col1 = year
##GWI_Inst
#GWI_inst from wood-based scenarios
Col_GI_1 = GWI_inst_tot_S1nu
Col_GI_2 = GWI_inst_tot_S1pl
Col_GI_5 = GWI_inst_tot_Enu
Col_GI_6 = GWI_inst_tot_Epl
#print(Col_GI_1)
#print(np.shape(Col_GI_1))
#GWI_inst from counter use scenarios
Col_GI_7 = GWI_inst_tot_NonRW_S1nu
Col_GI_8 = GWI_inst_tot_NonRW_S1pl
Col_GI_11 = GWI_inst_tot_NonRW_Enu
Col_GI_12 = GWI_inst_tot_NonRW_Epl
#print(Col_GI_7)
#print(np.shape(Col_GI_7))
#create column results
##GWI_cumulative
#GWI_cumulative from wood-based scenarios
Col_GC_1 = GWI_cum_S1nu
Col_GC_2 = GWI_cum_S1pl
Col_GC_5 = GWI_cum_Enu
Col_GC_6 = GWI_cum_Epl
#GWI_cumulative from counter use scenarios
Col_GC_7 = GWI_cum_NonRW_S1nu
Col_GC_8 = GWI_cum_NonRW_S1pl
Col_GC_11 = GWI_cum_NonRW_Enu
Col_GC_12 = GWI_cum_NonRW_Epl
#create column results
##GWPdyn
#GWPdyn from wood-based scenarios
Col_GWP_1 = GWP_dyn_cum_S1nu
Col_GWP_2 = GWP_dyn_cum_S1pl
Col_GWP_5 = GWP_dyn_cum_Enu
Col_GWP_6 = GWP_dyn_cum_Epl
#GWPdyn from counter use scenarios
Col_GWP_7 = GWP_dyn_cum_NonRW_S1nu
Col_GWP_8 = GWP_dyn_cum_NonRW_S1pl
Col_GWP_11 = GWP_dyn_cum_NonRW_Enu
Col_GWP_12 = GWP_dyn_cum_NonRW_Epl
#Create colum results
dfM_EC_GI = pd.DataFrame.from_dict({'Year':Col1,'M_EC_nucleus (W/m2)':Col_GI_1, 'M_EC_plasma (W/m2)':Col_GI_2,
'E_nucleus (W/m2)':Col_GI_5, 'E_plasma (W/m2)':Col_GI_6,
'NR_M_EC_nucleus (W/m2)':Col_GI_7, 'NR_M_EC_plasma (W/m2)':Col_GI_8,
'NR_E_nucleus (W/m2)':Col_GI_11, 'NR_E_plasma (W/m2)':Col_GI_12})
dfM_EC_GC = pd.DataFrame.from_dict({'Year':Col1,'M_EC_nucleus (W/m2)':Col_GC_1, 'M_EC_plasma (W/m2)':Col_GC_2,
'E_nucleus (W/m2)':Col_GC_5, 'E_plasma (W/m2)':Col_GC_6,
'NR_M_EC_nucleus (W/m2)':Col_GC_7, 'NR_M_EC_plasma (W/m2)':Col_GC_8,
'NR_E_nucleus (W/m2)':Col_GC_11, 'NR_E_plasma (W/m2)':Col_GC_12})
dfM_EC_GWP = pd.DataFrame.from_dict({'Year':Col1,'M_EC_nucleus (W/m2)':Col_GWP_1, 'M_EC_plasma (W/m2)':Col_GWP_2,
'E_nucleus (W/m2)':Col_GWP_5, 'E_plasma (W/m2)':Col_GWP_6,
'NR_M_EC_nucleus (W/m2)':Col_GWP_7, 'NR_M_EC_plasma (W/m2)':Col_GWP_8,
'NR_E_nucleus (W/m2)':Col_GWP_11, 'NR_E_plasma (W/m2)':Col_GWP_12})
#Export to excel
writer = pd.ExcelWriter('GraphResults_PF_PO_EC.xlsx', engine = 'xlsxwriter')
#GWI_inst
dfM_EC_GI.to_excel(writer, sheet_name = 'GWI_Inst_PF_PO_EC', header=True, index=False )
#GWI cumulative
dfM_EC_GC.to_excel(writer, sheet_name = 'Cumlative GWI_PF_PO_EC', header=True, index=False )
#GWP_dyn
dfM_EC_GWP.to_excel(writer, sheet_name = 'GWPdyn_PF_PO_EC', header=True, index=False )
writer.save()
writer.close()
#%%
#Step (21): Generate the excel file for the individual carbon emission and sequestration flows
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
print(len(year))
division = 1000*44/12
division_CH4 = 1000*16/12
#Mnu_existing
c_firewood_energy_S1nu = [x/division for x in c_firewood_energy_S1nu]
decomp_emissions[:,0] = [x/division for x in decomp_emissions[:,0]]
TestDSM1nu.o = [x/division for x in TestDSM1nu.o]
PH_Emissions_PO_S1nu = [x/division for x in PH_Emissions_PO_S1nu]
PH_Emissions_HWP_S1nu = [x/division for x in PH_Emissions_HWP_S1nu]
#OC_storage_S1nu = [x/division for x in OC_storage_S1nu]
flat_list_nucleus = [x/division for x in flat_list_nucleus]
decomp_tot_CO2_S1nu[:,0] = [x/division for x in decomp_tot_CO2_S1nu[:,0]]
decomp_tot_CH4_S1nu[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1nu[:,0]]
#Mpl_existing
c_firewood_energy_S1pl = [x/division for x in c_firewood_energy_S1pl]
TestDSM1pl.o = [x/division for x in TestDSM1pl.o]
PH_Emissions_PO_S1pl = [x/division for x in PH_Emissions_PO_S1pl]
PH_Emissions_HWP_S1pl = [x/division for x in PH_Emissions_HWP_S1pl]
#OC_storage_S1pl = [x/division for x in OC_storage_S1pl]
flat_list_plasma = [x/division for x in flat_list_plasma]
decomp_tot_CO2_S1pl[:,0] = [x/division for x in decomp_tot_CO2_S1pl[:,0]]
decomp_tot_CH4_S1pl[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1pl[:,0]]
#Enu
c_firewood_energy_Enu = [x/division for x in c_firewood_energy_Enu]
c_pellets_Enu = [x/division for x in c_pellets_Enu]
TestDSM3nu.o = [x/division for x in TestDSM3nu.o]
PH_Emissions_PO_Enu = [x/division for x in PH_Emissions_PO_Enu]
PH_Emissions_HWP_Enu = [x/division for x in PH_Emissions_HWP_Enu]
#OC_storage_Enu = [x/division for x in OC_storage_Enu]
decomp_tot_CO2_Enu[:,0] = [x/division for x in decomp_tot_CO2_Enu[:,0]]
decomp_tot_CH4_Enu[:,0] = [x/division_CH4 for x in decomp_tot_CH4_Enu[:,0]]
#Epl
c_firewood_energy_Epl = [x/division for x in c_firewood_energy_Epl]
c_pellets_Epl = [x/division for x in c_pellets_Epl]
TestDSM3pl.o = [x/division for x in TestDSM3pl.o]
PH_Emissions_PO_Epl = [x/division for x in PH_Emissions_PO_Epl]
PH_Emissions_HWP_Epl = [x/division for x in PH_Emissions_HWP_Epl]
#OC_storage_Epl = [x/division for x in OC_storage_Epl]
decomp_tot_CO2_Epl[:,0] = [x/division for x in decomp_tot_CO2_Epl[:,0]]
decomp_tot_CH4_Epl[:,0] = [x/division_CH4 for x in decomp_tot_CH4_Epl[:,0]]
#landfill aggregate flows
Landfill_decomp_PF_PO_S1nu = decomp_tot_CH4_S1nu, decomp_tot_CO2_S1nu
Landfill_decomp_PF_PO_S1pl = decomp_tot_CH4_S1pl, decomp_tot_CO2_S1pl
Landfill_decomp_PF_PO_Enu = decomp_tot_CH4_Enu, decomp_tot_CO2_Enu
Landfill_decomp_PF_PO_Epl = decomp_tot_CH4_Epl, decomp_tot_CO2_Epl
Landfill_decomp_PF_PO_S1nu = [sum(x) for x in zip(*Landfill_decomp_PF_PO_S1nu)]
Landfill_decomp_PF_PO_S1pl = [sum(x) for x in zip(*Landfill_decomp_PF_PO_S1pl)]
Landfill_decomp_PF_PO_Enu = [sum(x) for x in zip(*Landfill_decomp_PF_PO_Enu)]
Landfill_decomp_PF_PO_Epl = [sum(x) for x in zip(*Landfill_decomp_PF_PO_Epl)]
Landfill_decomp_PF_PO_S1nu = [item for sublist in Landfill_decomp_PF_PO_S1nu for item in sublist]
Landfill_decomp_PF_PO_S1pl = [item for sublist in Landfill_decomp_PF_PO_S1pl for item in sublist]
Landfill_decomp_PF_PO_Enu = [item for sublist in Landfill_decomp_PF_PO_Enu for item in sublist]
Landfill_decomp_PF_PO_Epl = [item for sublist in Landfill_decomp_PF_PO_Epl for item in sublist]
#Wood processing aggregate flows
OpProcessing_PF_PO_S1nu = [x + y for x, y in zip(PH_Emissions_PO_S1nu, PH_Emissions_HWP_S1nu)]
OpProcessing_PF_PO_S1pl = [x + y for x, y in zip(PH_Emissions_PO_S1pl, PH_Emissions_HWP_S1pl)]
OpProcessing_PF_PO_Enu = [x + y for x, y in zip(PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu)]
OpProcessing_PF_PO_Epl = [x + y for x, y in zip(PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl)]
Column1 = year
Column2 = decomp_emissions[:,0]
Column3 = flat_list_nucleus
Column4 = flat_list_plasma
#E_nu
Column5 = c_firewood_energy_Enu
Column5_1 = c_pellets_Enu
Column6 = TestDSM3nu.o
Column7 = OpProcessing_PF_PO_Enu
#Column9_1 = OC_storage_Enu
Column9 = Landfill_decomp_PF_PO_Enu
#E_pl
Column10 = c_firewood_energy_Epl
Column10_1 = c_pellets_Epl
Column11 = TestDSM3pl.o
Column12 = OpProcessing_PF_PO_Epl
#Column14_1 = OC_storage_Epl
Column14 = Landfill_decomp_PF_PO_Epl
#M_nu_existing
Column15 = c_firewood_energy_S1nu
Column16 = TestDSM1nu.o
Column17 = OpProcessing_PF_PO_S1nu
#Column19_1 = OC_storage_S1nu
Column19 = Landfill_decomp_PF_PO_S1nu
#M_pl_existing
Column20 = c_firewood_energy_S1pl
Column21 = TestDSM1pl.o
Column22 = OpProcessing_PF_PO_S1pl
#Column24_1 = OC_storage_S1pl
Column24 = Landfill_decomp_PF_PO_S1pl
#E_existing
dfE_nu = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column3,
#'9: Landfill storage (t-C)':Column9_1,
'F1-0: Residue decomposition (t-C)':Column2,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column5,
'F8-0: Operational stage/processing emissions (t-C)':Column7,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column6,
'F7-0: Landfill gas decomposition(t-C)':Column9,
'F4-0: Emissions from wood pellets use (t-C)':Column5_1})
dfE_pl = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column4,
#'9: Landfill storage (t-C)':Column14_1,
'F1-0: Residue decomposition (t-C)':Column2,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column10,
'F8-0: Operational stage/processing emissions (t-C)':Column12,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column11,
'F7-0: Landfill gas decomposition (t-C)':Column14,
'F4-0: Emissions from wood pellets use (t-C)':Column10_1})
#M_existing
dfM_nu_exst= pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column3,
# '9: Landfill storage (t-C)':Column19_1,
'F1-0: Residue decomposition (t-C)':Column2,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column15,
'F8-0: Operational stage/processing emissions (t-C)':Column17,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column16,
'F7-0: Landfill gas decomposition (t-C)':Column19})
dfM_pl_exst = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column4,
# '9: Landfill storage (t-C)':Column24_1,
'F1-0: Residue decomposition (t-C)':Column2,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column20,
'F8-0: Operational stage/processing emissions (t-C)':Column22,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column21,
'F7-0: Landfill gas decomposition (t-C)':Column24})
writer = pd.ExcelWriter('C_flows_PF_PO_EC.xlsx', engine = 'xlsxwriter')
dfM_nu_exst.to_excel(writer, sheet_name = 'PF_PO_M_EC_nu', header=True, index=False )
dfM_pl_exst.to_excel(writer, sheet_name = 'PF_PO_M_EC_pl', header=True, index=False)
dfE_nu.to_excel(writer, sheet_name = 'PF_PO_E_EC_nu', header=True, index=False)
dfE_pl.to_excel(writer, sheet_name = 'PF_PO_E_EC_pl', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs
#PF_PO_M_EC_nu
fig=plt.figure()
fig.show()
ax1=fig.add_subplot(111)
#plot
ax1.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_S1nu, color='darkturquoise', label='9: Landfill storage')
ax1.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1.plot(t, c_firewood_energy_S1nu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1.plot(t, OpProcessing_PF_PO_S1nu, color='orange', label='F8-0: Operational stage/processing emissions')
ax1.plot(t, TestDSM1nu.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1.plot(t, Landfill_decomp_PF_PO_S1nu, color='yellow', label='F7-0: Landfill gas decomposition')
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1.set_xlim(-1,200)
ax1.set_yscale('symlog')
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('C flows (t-C) (symlog)')
ax1.set_title('Carbon flow, PF_PO_M_EC_nucleus (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_M_EC_nu
f, (ax_a, ax_b) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_a.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_a.plot(t, OC_storage_S1nu, color='darkturquoise', label='9: Landfill storage')
ax_a.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_a.plot(t, c_firewood_energy_S1nu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_a.plot(t, OpProcessing_PF_PO_S1nu, color='orange', label='F8-0: Operational stage/processing emissions')
ax_a.plot(t, TestDSM1nu.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax_a.plot(t, Landfill_decomp_PF_PO_S1nu, color='yellow', label='F7-0: Landfill gas decomposition')
ax_b.plot(t, c_firewood_energy_S1nu, color='mediumseagreen')
ax_b.plot(t, decomp_emissions[:,0], color='lightcoral')
ax_b.plot(t, TestDSM1nu.o, color='royalblue')
ax_b.plot(t, OpProcessing_PF_PO_S1nu, color='orange')
#ax_b.plot(t, OC_storage_S1nu, color='darkturquoise')
ax_b.plot(t, Landfill_decomp_PF_PO_S1nu, color='yellow')
ax_b.plot(t, flat_list_nucleus, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_a.set_xlim(-1,200)
ax_a.set_ylim(120, 150)
ax_b.set_ylim(-25, 40)
# hide the spines between ax and ax2
ax_a.spines['bottom'].set_visible(False)
ax_b.spines['top'].set_visible(False)
ax_a.xaxis.tick_top()
ax_a.tick_params(labeltop=False) # don't put tick labels at the top
ax_b.xaxis.tick_bottom()
ax_a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_a.transAxes, color='k', clip_on=False)
ax_a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_b.transAxes) # switch to the bottom axes
ax_b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_b.set_xlabel('Time (year)')
ax_b.set_ylabel('C flows (t-C)')
ax_a.set_ylabel('C flows (t-C)')
ax_a.set_title('Carbon flow, PF_PO_M_EC_nucleus')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#PF_PO_M_EC_pl
fig=plt.figure()
fig.show()
ax2=fig.add_subplot(111)
#plot
ax2.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_S1pl, color='darkturquoise', label='9: Landfill storage')
ax2.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax2.plot(t, c_firewood_energy_S1pl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2.plot(t, OpProcessing_PF_PO_S1pl, color='orange', label='F8-0: Operational stage/processing emissions')
ax2.plot(t, TestDSM1pl.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2.plot(t, Landfill_decomp_PF_PO_S1pl, color='yellow', label='F7-0: Landfill gas decomposition')
ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2.set_xlim(-1,200)
ax2.set_yscale('symlog')
ax2.set_xlabel('Time (year)')
ax2.set_ylabel('C flows (t-C) (symlog)')
ax2.set_title('Carbon flow, PF_PO_M_EC_plasma (symlog-scale)')
plt.show()
#%%
#PF_PO_M_EC_pl
f, (ax_c, ax_d) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_c.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_c.plot(t, OC_storage_S1pl, color='darkturquoise', label='9: Landfill storage')
ax_c.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_c.plot(t, c_firewood_energy_S1pl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_c.plot(t, OpProcessing_PF_PO_S1pl, color='orange', label='F8-0: Operational stage/processing emissions')
ax_c.plot(t, TestDSM1pl.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax_c.plot(t, Landfill_decomp_PF_PO_S1pl, color='yellow', label='F7-0: Landfill gas decomposition')
ax_d.plot(t, c_firewood_energy_S1pl, color='mediumseagreen')
ax_d.plot(t, decomp_emissions[:,0], color='lightcoral')
ax_d.plot(t, TestDSM1pl.o, color='royalblue')
ax_d.plot(t, OpProcessing_PF_PO_S1pl, color='orange')
#ax_d.plot(t, OC_storage_S1pl, color='darkturquoise')
ax_d.plot(t, Landfill_decomp_PF_PO_S1pl, color='yellow')
ax_d.plot(t, flat_list_plasma, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_c.set_xlim(-1,200)
ax_c.set_ylim(120, 150)
ax_d.set_ylim(-25, 40)
# hide the spines between ax and ax2
ax_c.spines['bottom'].set_visible(False)
ax_d.spines['top'].set_visible(False)
ax_c.xaxis.tick_top()
ax_c.tick_params(labeltop=False) # don't put tick labels at the top
ax_d.xaxis.tick_bottom()
ax_c.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_c.transAxes, color='k', clip_on=False)
ax_c.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_c.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_d.transAxes) # switch to the bottom axes
ax_d.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_d.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_d.set_xlabel('Time (year)')
ax_d.set_ylabel('C flows (t-C)')
ax_c.set_ylabel('C flows (t-C)')
ax_c.set_title('Carbon flow, PF_PO_M_EC_plasma')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#PF_PO_E_EC_nu
fig=plt.figure()
fig.show()
ax3=fig.add_subplot(111)
#plot
ax3.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3.plot(t, OC_storage_Enu, color='darkturquoise', label='9: Landfill storage')
ax3.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax3.plot(t, c_firewood_energy_Enu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3.plot(t, OpProcessing_PF_PO_Enu, color='orange', label='F8-0: Operational stage/processing emissions')
ax3.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow', label='F7-0: Landfill gas decomposition')
ax3.plot(t, c_pellets_Enu, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax3.plot(t, TestDSM3nu.o, color='royalblue', label='in-use stock output')
ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3.set_xlim(-1,200)
ax3.set_yscale('symlog')
ax3.set_xlabel('Time (year)')
ax3.set_ylabel('C flows (t-C) (symlog)')
ax3.set_title('Carbon flow, PF_PO_E_EC_nucleus (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_E_EC_nu
f, (ax_e, ax_f) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_e.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_e.plot(t, OC_storage_Enu, color='darkturquoise', label='9: Landfill storage')
ax_e.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_e.plot(t, c_firewood_energy_Enu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_e.plot(t, OpProcessing_PF_PO_Enu, color='orange', label='F8-0: Operational stage/processing emissions')
ax_e.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow', label='F7-0: Landfill gas decomposition')
ax_e.plot(t, c_pellets_Enu, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_e.plot(t, TestDSM3nu.o, color='royalblue', label='in-use stock output')
ax_f.plot(t, c_firewood_energy_Enu, color='mediumseagreen')
ax_f.plot(t, decomp_emissions[:,0], color='lightcoral')
ax_f.plot(t, c_pellets_Enu, color='slategrey')
#ax_f.plot(t, TestDSM3nu.o, color='royalblue')
#ax_f.plot(t, OC_storage_Enu, color='darkturquoise')
ax_f.plot(t, OpProcessing_PF_PO_Enu, color='orange')
ax_f.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow')
ax_f.plot(t, flat_list_nucleus, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_e.set_xlim(-1,200)
ax_e.set_ylim(90, 110)
ax_f.set_ylim(-25, 40)
# hide the spines between ax and ax2
ax_e.spines['bottom'].set_visible(False)
ax_f.spines['top'].set_visible(False)
ax_e.xaxis.tick_top()
ax_e.tick_params(labeltop=False) # don't put tick labels at the top
ax_f.xaxis.tick_bottom()
ax_e.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_e.transAxes, color='k', clip_on=False)
ax_e.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_e.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_f.transAxes) # switch to the bottom axes
ax_f.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_f.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_f.set_xlabel('Time (year)')
ax_f.set_ylabel('C flows (t-C)')
ax_e.set_ylabel('C flows (t-C)')
ax_e.set_title('Carbon flow, PF_PO_E_EC_nucleus')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#PF_PO_E_EC_pl
fig=plt.figure()
fig.show()
ax4=fig.add_subplot(111)
#plot
ax4.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4.plot(t, OC_storage_Epl, color='darkturquoise', label='9: Landfill storage')
ax4.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax4.plot(t, c_firewood_energy_Epl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4.plot(t, OpProcessing_PF_PO_Epl, color='orange', label='F8-0: Operational stage/processing emissions')
ax4.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow', label='F7-0: Landfill gas decomposition')
ax4.plot(t, c_pellets_Epl, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_4.plot(t, TestDSM3pl.o, color='royalblue', label='in-use stock output')
ax4.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4.set_xlim(-1,200)
ax4.set_yscale('symlog')
ax4.set_xlabel('Time (year)')
ax4.set_ylabel('C flows (t-C) (symlog)')
ax4.set_title('Carbon flow, PF_PO_E_EC_plasma (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_E_EC_pl
f, (ax_g, ax_h) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_g.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_g.plot(t, OC_storage_Epl, color='darkturquoise', label='9: Landfill storage')
ax_g.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_g.plot(t, c_firewood_energy_Epl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_g.plot(t, OpProcessing_PF_PO_Epl, color='orange', label='F8-0: Operational stage/processing emissions')
ax_g.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow', label='F7-0: Landfill gas decomposition')
ax_g.plot(t, c_pellets_Epl, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_g.plot(t, TestDSM3pl.o, color='royalblue', label='in-use stock output')
ax_h.plot(t, c_firewood_energy_Epl, color='mediumseagreen')
ax_h.plot(t, decomp_emissions[:,0], color='lightcoral')
ax_h.plot(t, c_pellets_Epl, color='slategrey')
#ax_h.plot(t, TestDSM3pl.o, color='royalblue')
ax_h.plot(t, OpProcessing_PF_PO_Epl, color='orange')
#ax_h.plot(t, OC_storage_Epl, color='darkturquoise')
ax_h.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow')
ax_h.plot(t, flat_list_plasma, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_g.set_xlim(-1,200)
ax_g.set_ylim(90, 110)
ax_h.set_ylim(-25, 40)
# hide the spines between ax and ax2
ax_g.spines['bottom'].set_visible(False)
ax_h.spines['top'].set_visible(False)
ax_g.xaxis.tick_top()
ax_g.tick_params(labeltop=False) # don't put tick labels at the top
ax_h.xaxis.tick_bottom()
ax_g.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_g.transAxes, color='k', clip_on=False)
ax_g.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_g.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_h.transAxes) # switch to the bottom axes
ax_h.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_h.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_h.set_xlabel('Time (year)')
ax_h.set_ylabel('C flows (t-C)')
ax_g.set_ylabel('C flows (t-C)')
ax_g.set_title('Carbon flow, PF_PO_E_EC_plasma')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#Step (23): Generate the excel file for the net carbon balance
Agg_Cflow_PF_PO_S1nu = [c_firewood_energy_S1nu, decomp_emissions[:,0], TestDSM1nu.o, OpProcessing_PF_PO_S1nu, Landfill_decomp_PF_PO_S1nu, flat_list_nucleus]
Agg_Cflow_PF_PO_S1pl = [c_firewood_energy_S1pl, decomp_emissions[:,0], TestDSM1pl.o, OpProcessing_PF_PO_S1pl, Landfill_decomp_PF_PO_S1pl, flat_list_plasma]
Agg_Cflow_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions[:,0], TestDSM3nu.o, OpProcessing_PF_PO_Enu, Landfill_decomp_PF_PO_Enu, flat_list_nucleus]
Agg_Cflow_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions[:,0], TestDSM3pl.o, OpProcessing_PF_PO_Epl, Landfill_decomp_PF_PO_Epl, flat_list_plasma]
Agg_Cflow_PF_PO_S1nu = [sum(x) for x in zip(*Agg_Cflow_PF_PO_S1nu)]
Agg_Cflow_PF_PO_S1pl = [sum(x) for x in zip(*Agg_Cflow_PF_PO_S1pl)]
Agg_Cflow_PF_PO_Enu = [sum(x) for x in zip(*Agg_Cflow_PF_PO_Enu)]
Agg_Cflow_PF_PO_Epl = [sum(x) for x in zip(*Agg_Cflow_PF_PO_Epl)]
fig=plt.figure()
fig.show()
ax5=fig.add_subplot(111)
# plot
ax5.plot(t, Agg_Cflow_PF_PO_S1nu, color='orange', label='M_EC_nucleus')
ax5.plot(t, Agg_Cflow_PF_PO_S1pl, color='darkturquoise', label='M_EC_plasma')
ax5.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_EC_nucleus')
ax5.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_EC_plasma')
ax5.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax5.set_xlim(-1,200)
ax5.set_yscale('symlog')
ax5.set_xlabel('Time (year)')
ax5.set_ylabel('C flows (t-C) (symlog)')
ax5.set_title('Aggr. C-emissions/sequestration flow, PF_PO_EC (symlog-scale)')
plt.show()
#create column year
year = []
for x in range (0, 201):
year.append(x)
print (year)
#Create colum results
dfM_EC_PF_PO = pd.DataFrame.from_dict({'Year':year,'M_EC_nucleus (t-C)':Agg_Cflow_PF_PO_S1nu, 'M_EC_plasma (t-C)':Agg_Cflow_PF_PO_S1pl,
'E_EC_nucleus (t-C)':Agg_Cflow_PF_PO_Enu, 'E_EC_plasma (t-C)':Agg_Cflow_PF_PO_Epl})
#Export to excel
writer = pd.ExcelWriter('AggCFlow_PF_PO_EC.xlsx', engine = 'xlsxwriter')
dfM_EC_PF_PO.to_excel(writer, sheet_name = 'PF_PO_EC', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (24): Plot the net carbon balance
f, (ax5a, ax5b) = plt.subplots(2, 1, sharex=True)
# plot
ax5a.plot(t, Agg_Cflow_PF_PO_S1nu, color='orange', label='M_EC_nucleus')
ax5a.plot(t, Agg_Cflow_PF_PO_S1pl, color='darkturquoise', label='M_EC_plasma')
ax5a.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_EC_nucleus')
ax5a.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_EC_plasma')
ax5a.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
ax5b.plot(t, Agg_Cflow_PF_PO_S1nu, color='orange', label='M_EC_nucleus')
ax5b.plot(t, Agg_Cflow_PF_PO_S1pl, color='darkturquoise', label='M_EC_plasma')
ax5b.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_EC_nucleus')
ax5b.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_EC_plasma')
ax5b.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
# zoom-in / limit the view to different portions of the data
ax5a.set_xlim(-1,200)
ax5a.set_ylim(200, 220)
ax5b.set_ylim(-5, 50)
# hide the spines between ax and ax2
ax5a.spines['bottom'].set_visible(False)
ax5b.spines['top'].set_visible(False)
ax5a.xaxis.tick_top()
ax5a.tick_params(labeltop=False) # don't put tick labels at the top
ax5b.xaxis.tick_bottom()
ax5a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax5a.transAxes, color='k', clip_on=False)
ax5a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax5a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax5b.transAxes) # switch to the bottom axes
ax5b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax5b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax5b.set_xlabel('Time (year)')
ax5b.set_ylabel('C flows (t-C)')
ax5a.set_ylabel('C flows (t-C)')
ax5a.set_title('Net carbon balance, PF_PO_EC')
plt.show()
#%%
# Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1)
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = | pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu') | pandas.read_excel |
from collections import Counter
from itertools import repeat
from pathlib import Path
from typing import Tuple
from ortools.linear_solver import pywraplp
from concurrent.futures import ThreadPoolExecutor
import multiprocessing
import pandas as pd
import numpy as np
from ..document import Document
from ..alignment import Alignment
from ..amr import AMR
def expand_graph(graph: AMR, corpus: Document) -> None:
"""
Expand the given `graph` by adding edges between all nodes in the same sentence,
according to the provided `corpus`.
This expansion is done **in place**.
Parameters:
graph (AMR): Graph to be expanded.
corpus (Document): Corpus corresponding to the `graph`.
"""
for _, _, amr in corpus:
for node1 in amr.nodes:
for node2 in amr.nodes:
u = graph.get_label_node(amr.get_node_label(node1))
v = graph.get_label_node(amr.get_node_label(node2))
if u == v or (node1, node2) in amr.edges():
# Don't expand if the edge already exists
continue
elif (u, v) not in graph.edges():
graph.add_edge(u, v, key='expansion', count=1)
else:
try:
# Edge has already been expanded
graph.edges[(u, v, 'expansion')]['count'] += 1
except KeyError:
# The edge exists but it is not an expansion
continue
def calculate_node_data(corpus: Document, alignment: Alignment) -> Tuple[Counter, dict, dict, dict]:
"""
Compute the number of occurences of each node,
along with their depth (distance to the root) in each sentence graph they occur.
This also computes in which sentences each node occurs (their position in the given corpus).
Finally, this uses the `alignment` to get the number of words (span length) aligned to each node.
Parameters:
corpus (Document): Corpus from which to extract the data.
alignment (Alignment): Concept alignment data for the given `corpus`.
Returns:
tuple(Counter, dict, dict, dict): Number of occurences for each node. Their depths. Their positions and their span lengths.
"""
node_counts = Counter()
node_depths = dict()
node_positions = dict()
span_lengths = dict()
for i, doc in enumerate(corpus):
doc_idx = alignment.get_sentence_position(doc.snt)
doc_alignment = alignment[doc_idx] if doc_idx is not None else None
for node in doc.amr.nodes():
try:
node_label = doc.amr.nodes[node]['label']
except KeyError:
node_label = node
node_counts[node_label] += 1
if node_label not in node_depths:
node_depths[node_label] = list()
node_depths[node_label].append(doc.amr.get_node_depth(node))
if node_label not in node_positions:
node_positions[node_label] = list()
node_positions[node_label].append(i+1)
if node_label not in span_lengths:
span_lengths[node_label] = list()
# Get alignment info
if doc_alignment is not None:
if node_label.startswith('NER:'):
# Alignment for NER nodes is obtained from the first name (op1)
aligned_concept = node_label.split('.')[2]
else:
aligned_concept = node_label
try:
span_len = len(doc_alignment[aligned_concept])
span_lengths[node_label].append(span_len)
except KeyError:
# No alignment for the concept found
span_lengths[node_label].append(0)
else:
# No alignment information for the sentence
span_lengths[node_label].append(0)
return node_counts, node_depths, node_positions, span_lengths
def get_node_features(amr: AMR, data: tuple) -> pd.DataFrame:
"""
Create the local representations (features) for each node in the given graph.
Parameters:
amr (AMR): Graph from which to compute the attributes.
data (tuple): Node data tuple created from the LiuEtAl2015.calculate_node_data() function.
Returns:
pd.DataFrame: Nodes local representations (features).
"""
node_counts, node_depths, node_positions, span_lengths = data
features_names = ['concept',
'n_freq_0', 'n_freq_1', 'n_freq_2', 'n_freq_5', 'n_freq_10',
'min_depth_1', 'min_depth_2', 'min_depth_3', 'min_depth_4', 'min_depth_5',
'avg_depth_1', 'avg_depth_2', 'avg_depth_3', 'avg_depth_4', 'avg_depth_5',
'n_fmst_pos_5', 'n_fmst_pos_6', 'n_fmst_pos_7', 'n_fmst_pos_10', 'n_fmst_pos_15',
'n_avg_pos_5', 'n_avg_pos_6', 'n_avg_pos_7', 'n_avg_pos_10', 'n_avg_pos_15',
'lngst_span_0', 'lngst_span_1', 'lngst_span_2', 'lngst_span_5', 'lngst_span_10',
'avg_span_0', 'avg_span_1', 'avg_span_2', 'avg_span_5', 'avg_span_10',
'ner', 'date',
'n_bias']
features = dict()
for node in amr.nodes():
features[node] = list()
try:
# Concept
node_label = amr.nodes[node]['label']
except KeyError:
# Constant
node_label = node
# Concept feature
features[node].append(node_label)
# freq_0
freq = 1.0 if node_counts[node_label] == 0 else 0.0
features[node].append(freq)
# freq_1, freq_2, freq_5, freq_10
for t in [1, 2, 5, 10]:
freq = 1.0 if node_counts[node_label] >= t else 0.0
features[node].append(freq)
# min_depth_1, min_depth_2, min_depth_3, min_depth_4, min_depth_5
for t in [1, 2, 3, 4, 5]:
if node == amr.get_top():
# TOP node has depth 0
depth = 0.0
else:
depth = 1.0 if min(node_depths[node_label]) >= t else 0.0
features[node].append(depth)
# avg_depth_1, avg_depth_2, avg_depth_3, avg_depth_4, avg_depth_5
if node == amr.get_top():
avg_depth = 0.0
else:
avg_depth = np.mean(node_depths[node_label])
for t in [1, 2, 3, 4, 5]:
depth = 1.0 if avg_depth >= t else 0.0
features[node].append(depth)
# fmst_pos_5, fmst_pos_6, fmst_pos_7, fmst_pos_10, fmst_pos_15
for t in [5, 6, 7, 10, 15]:
if node_label in node_positions:
pos = 1.0 if min(node_positions[node_label]) >= t else 0.0
else:
# There is no information about this specific node
pos = 0.0
features[node].append(pos)
# avg_pos_5, avg_pos_6, avg_pos_7, avg_pos_10, avg_pos_15
if node_label in node_positions:
avg_pos = np.mean(node_positions[node_label])
else:
avg_pos = 0.0
for t in [5, 6, 7, 10, 15]:
pos = 1.0 if avg_pos >= t else 0.0
features[node].append(pos)
# lngst_span_0, lngst_span_1, lngst_span_2, lngst_span_5, lngst_span_10
for t in [0, 1, 2, 5, 10]:
if node_label in span_lengths:
span = 1.0 if max(span_lengths[node_label]) >= t else 0.0
else:
span = 0.0
features[node].append(span)
if node_label in span_lengths:
avg_span = np.mean(span_lengths[node_label])
else:
avg_span = 0.0
for t in [0, 1, 2, 5, 10]:
span = 1.0 if avg_span >= t else 0.0
features[node].append(span)
# ner
ner = 1.0 if node_label.startswith('NER:') else 0.0
features[node].append(ner)
# date
date = 1.0 if node_label.startswith('DATE:') else 0.0
features[node].append(date)
# bias
features[node].append(1.0)
return pd.DataFrame(features,
index=features_names,
dtype=np.float32).T
def calculate_edge_data(corpus: Document) -> Tuple[Counter, dict]:
"""
Compute the number of occurences of each edge and in which sentences they occur
(their position in the given corpus).
Parameters:
corpus (Document): Corpus from which to extract the data.
Returns:
tuple(Counter, dict): Number of occurences of each edge and their positions.
"""
edge_counts = Counter()
edge_positions = dict()
for i, doc in enumerate(corpus):
for u, v, r in doc.amr.edges:
try:
u_label = doc.amr.nodes[u]['label']
except KeyError:
u_label = u
try:
v_label = doc.amr.nodes[v]['label']
except KeyError:
v_label = v
edge_counts[u_label, v_label, r] += 1
if (u_label, v_label) not in edge_positions:
edge_positions[(u_label, v_label)] = list()
edge_positions[(u_label, v_label)].append(i+1)
return edge_counts, edge_positions
def get_edge_features(merged_graph: AMR, data: tuple, nodes_features: pd.DataFrame) -> pd.DataFrame:
"""
Create the local representations (features) for each edge in the given graph.
Parameters:
merged_graph (AMR): Graph from which to compute the attributes.
data (tuple): Edge data tuple created by the LiuEtAl2015.calculate_edge_data() function.
nodes_features (pd.DataFrame): Node local representations (features) for the given `merged_graph`.
Returns:
pd.DataFrame: Edges local representations (features).
"""
edge_counts, edge_positions = data
features_names = ['label_1_05', 'label_1_066', 'label_1_075',
'label_2_05', 'label_2_066', 'label_2_075',
'e_freq_0', 'e_freq_1', 'e_freq_2', 'e_freq_5', 'e_freq_10',
'e_fmst_pos_5', 'e_fmst_pos_6', 'e_fmst_pos_7', 'e_fmst_pos_10', 'e_fmst_pos_15',
'e_avg_pos_5', 'e_avg_pos_6', 'e_avg_pos_7', 'e_avg_pos_10', 'e_avg_pos_15']
node1_names = nodes_features.add_prefix(
'node1_').columns[nodes_features.columns != 'bias']
node2_names = nodes_features.add_prefix(
'node2_').columns[nodes_features.columns != 'bias']
features_names.extend(node1_names)
features_names.extend(node2_names)
features_names.extend(['expansion',
'exp_freq_0', 'exp_freq_1', 'exp_freq_2', 'exp_freq_5', 'exp_freq_10',
'e_bias'])
# Get all edges between each pair of nodes
edges = dict()
for u, v, r in merged_graph.edges:
if (u, v) not in edges:
edges[(u, v)] = list()
edges[(u, v)].append(r)
features = dict()
for u, v in edges:
features[(u, v)] = list()
u_label = merged_graph.get_node_label(u)
v_label = merged_graph.get_node_label(v)
# label
l_freqs = Counter({l: edge_counts[(u_label, v_label, l)]
for l in edges[(u, v)]})
frequent_labels = l_freqs.most_common(2)
# label_1_05, label_1_066, label_1_075
relative_freq = frequent_labels[0][1] * 1.0 / len(edges[(u, v)])
for t in [0.5, 0.66, 0.75]:
label = 1.0 if relative_freq >= t else 0.0
features[(u, v)].append(label)
# label_2_05, label_2_066, label_2_075
if len(frequent_labels) > 1:
relative_freq = frequent_labels[1][1] * 1.0 / len(edges[(u, v)])
for t in [0.5, 0.66, 0.75]:
label = 1.0 if relative_freq >= t else 0.0
features[(u, v)].append(label)
else:
features[(u, v)].extend(3*[0])
non_expanded_edges = [e for e in edges[(u, v)] if e != 'expansion']
# freq_0
freq = 1.0 if len(non_expanded_edges) == 0 else 0.0
features[(u, v)].append(freq)
# freq_1, freq_2, freq_5, freq_10
for t in [1, 2, 5, 10]:
freq = 1.0 if len(non_expanded_edges) >= t else 0.0
features[(u, v)].append(freq)
try:
positions = edge_positions[(u_label, v_label)]
except KeyError:
positions = [0.0]
# fmst_pos_5, fmst_pos_6, fmst_pos_7, fmst_pos_10, fmst_pos_15
fmst_pos = min(positions)
for t in [5, 6, 7, 10, 15]:
pos = 1.0 if fmst_pos >= t else 0.0
features[(u, v)].append(pos)
# avg_pos_5, avg_pos_6, avg_pos_7, avg_pos_10, avg_pos_15
avg_pos = np.mean(positions)
for t in [5, 6, 7, 10, 15]:
pos = 1.0 if avg_pos >= t else 0.0
features[(u, v)].append(pos)
# nodes features
node1_features = nodes_features.loc[u,
nodes_features.columns != 'bias']
features[(u, v)].extend(node1_features)
node2_features = nodes_features.loc[v,
nodes_features.columns != 'bias']
features[(u, v)].extend(node2_features)
# expansion
expansion = 1.0 if 'expansion' in edges[(u, v)] else 0.0
features[(u, v)].append(expansion)
# exp_freq_0
freq = 1.0 if len(edges[(u, v)]) == 0 else 0.0
features[(u, v)].append(freq)
# exp_freq_1, exp_freq_2, exp_freq_5, exp_freq_10
for t in [1, 2, 5, 10]:
freq = 1.0 if len(edges[(u, v)]) >= t else 0.0
features[(u, v)].append(freq)
# bias
features[(u, v)].append(1.0)
return pd.DataFrame(features, index=features_names, dtype=np.float32).T
def ilp_optimisation(node_features: pd.DataFrame, edge_features: pd.DataFrame,
weights: np.array, top: str, nodes_cost: np.array = 0,
edge_cost: np.array = 0) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Run ILP optimization to select nodes and edges according to their features and the given weights.
Parameters:
node_features (pd.DataFrame): Node local representations (features).
edge_features (pd.DataFrame): Edge local representations (features).
weights (np.array): Feature weights to calculate a score for each node/edge.
top (str): Which node (variable) to use as the root of the graph.
nodes_cost (np.array): Value to sum into the computed score for each node
(`n` positions, `n` being the number of nodes).
edges_cost (np.array): Value to sum into the computed score for each edge
(`e` positions, `e` being the number of nodes).
Returns:
tuple(pd.DataFrame, pd.DataFrame): Selected nodes and selected edges.
"""
nodes_scores = np.dot(node_features, weights) + nodes_cost
edge_scores = np.dot(edge_features, weights) + edge_cost
solver = pywraplp.Solver('LiuEtAl2015',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
nodes_var = {n: solver.IntVar(0, 1, 'node[{}]'.format(n))
for n, _ in node_features.iterrows()}
edges_var = {e: solver.IntVar(0, 1, 'edge[{}]'.format(e))
for e, _ in edge_features.iterrows()}
flow_var = {(n1, n2): solver.IntVar(0, solver.Infinity(), 'flow[{}]'.format((n1, n2)))
for n1, _ in node_features.iterrows()
for n2, _ in node_features.iterrows()}
# Constraints
# If an edge is selected, both nodes have to be selected too
for s, t in edges_var:
edge_s_ct = solver.Constraint(
0, 1, 'ct_edge[{}][{}]'.format((s, t), s))
edge_s_ct.SetCoefficient(nodes_var[s], 1)
edge_s_ct.SetCoefficient(edges_var[(s, t)], -1)
if t != s:
# Loops have only one constraint
edge_t_ct = solver.Constraint(
0, 1, 'ct_edge[{}][{}]'.format((s, t), t))
edge_t_ct.SetCoefficient(nodes_var[t], 1)
edge_t_ct.SetCoefficient(edges_var[(s, t)], -1)
# Select at most one edge between two nodes
# If there is more than one direction between the nodes
if (t, s) in edges_var:
self_loop_ct = solver.Constraint(
0, 1, 'ct_self_loop[{}][{}]'.format(s, t))
self_loop_ct.SetCoefficient(edges_var[(s, t)], 1)
self_loop_ct.SetCoefficient(edges_var[(t, s)], 1)
# Connectivity
root_flow_ct = solver.Constraint(0, 0, 'root_flow_ct')
for n, _ in node_features.iterrows():
root_flow_ct.SetCoefficient(flow_var[(top, n)], 1)
if n != top:
root_flow_ct.SetCoefficient(nodes_var[n], -1)
flow_consumption_ct = solver.Constraint(0, 0,
'flow_consumption[{}]'.format(n))
for n2, _ in node_features.iterrows():
# Incoming flow
flow_consumption_ct.SetCoefficient(flow_var[(n2, n)], 1)
# Outgoing flow
if n2 != top:
flow_consumption_ct.SetCoefficient(flow_var[(n, n2)], -1)
flow_consumption_ct.SetCoefficient(nodes_var[n], -1)
# Flow must go through a selected edge
for src, tgt in flow_var:
if tgt != top:
edge_flow_ct = solver.Constraint(0, solver.infinity(),
'edge_flow[{}]'.format((src, tgt)))
if (src, tgt) in edges_var:
edge_flow_ct.SetCoefficient(
edges_var[(src, tgt)], nodes_scores.shape[0])
edge_flow_ct.SetCoefficient(flow_var[(src, tgt)], -1)
# Force tree structure
tree_ct = dict()
for n, _ in node_features.iterrows():
tree_ct[n] = solver.Constraint(0, 1, 'tree[{}]'.format(n))
for (s, t), _ in edge_features.iterrows():
tree_ct[t].SetCoefficient(edges_var[(s, t)], 1)
# Objective
obj = solver.Objective()
obj.SetMaximization()
for i, v in enumerate(nodes_var):
obj.SetCoefficient(nodes_var[v], nodes_scores[i])
for i, v in enumerate(edges_var):
obj.SetCoefficient(edges_var[v], edge_scores[i])
solver.Solve()
nodes = [True if nodes_var[n].solution_value() == 1.0 else False
for n, _ in node_features.iterrows()]
edges = [True if edges_var[e].solution_value() == 1.0 else False
for e, _ in edge_features.iterrows()]
return node_features.loc[nodes, :], edge_features.loc[edges, :]
def graph_local_representations(graph: AMR, node_data: tuple, edge_data: tuple) -> pd.DataFrame:
"""
Concatenate the local representations of all nodes and edges in the given graph.
Parameters:
graph (AMR): Graph to which create the representations.
node_data (tuple): Node data tuple created by the LiuEtAl2015.calculate_node_data() function.
edge_data (tuple): Edge data tuple created by the LiuEtAl2015.calculate_edge_data() function.
Returns:
pd.DataFrame: Matrix containing all features for each node and edge in the `graph`.
"""
nodes_features = get_node_features(graph, node_data)
edge_features = get_edge_features(graph, edge_data, nodes_features)
return pd.concat([nodes_features, edge_features], axis=0).fillna(0.0)
def prepare_training_data(training_path, gold_path, alignment):
"""
Create the training instances, one for each node/edge in each graph in both training_path and gold_path.
Both training and gold paths must be aligned, so that the `gold_path` documents corresponds to
the target value (the summary) of the `training_path` document.
Parameters:
training_path (Path): Training document.
gold_path (Path): Target/summary document.
alignment (Alignment): Concept alignments containing information of both train and target documents.
Returns:
DataFrame: Matrix containing the attributes representations for each node/edge in both training and gold documents.
"""
training_corpus = Document.read(training_path)
training_graph = training_corpus.merge_graphs(collapse_ner=True,
collapse_date=True)
node_data = calculate_node_data(training_corpus, alignment)
edge_data = calculate_edge_data(training_corpus)
summary_corpus = Document.read(gold_path)
gold_summary_graph = summary_corpus.merge_graphs(collapse_ner=True,
collapse_date=True)
sum_repr = graph_local_representations(gold_summary_graph,
node_data, edge_data)
sum_repr['name'] = training_path.stem
sum_repr['type'] = 'target'
train_repr = graph_local_representations(training_graph,
node_data, edge_data)
train_repr['name'] = training_path.stem
train_repr['type'] = 'train'
train_repr.at[training_graph.get_top(), 'top'] = True
final_reprs = pd.concat([train_repr, sum_repr])
final_reprs['concept'] = final_reprs['concept'].replace(0.0, np.nan)
return final_reprs
def update_weights(weights: np.array, train: pd.DataFrame, gold: pd.DataFrame,
top: str, loss: str = 'perceptron') -> Tuple[np.array, pd.DataFrame, pd.DataFrame]:
"""
Update a given array of weights via AdaGrad upon a given train-gold graph pair.
The given loss function (perceptron or ramp) is used to compute how the array
should be updated.
Parameters:
weights (np.array): Initial weight array.
train (pd.DataFrame): Training instance (all local representations, nodes and edges,
of a single merged AMR graph for the texts beign summarized)
gold (pd.DataFrame): Target instance (all local representations, nodes and edges,
of a single merged AMR graph for the gold summary)
top (str): TOP node (variable) for the train graph.
loss (str): Which loss function to use (perceptron or ramp).
Returns:
tuple(np.array, pd.DataFrame, pd.DataFrame): Triple with the updated weights, along with the selected edges
(via ILP using the given weights) for both gold and train graphs,
respectively (in the case of using a ramp loss function, otherwise None).
"""
train_nodes = train.loc[train['n_bias'] == 1.0, :]
train_edges = train.loc[train['e_bias'] == 1.0, :]
if loss == 'perceptron':
gold_global = gold.sum(axis=0)
ilp_n, ilp_e = ilp_optimisation(train_nodes, train_edges, weights, top)
ilp_global = ilp_n.sum(axis=0) + ilp_e.sum(axis=0)
gold_n, gold_e = None, None
elif loss == 'ramp':
# Set to 1 all nodes/edges that are in training, but not in target
cost_idx = train.index.difference(gold.index)
cost = pd.Series(0.0, index=train.index)
cost.loc[cost_idx] = 1.0
# Run with negative costs
gold_n, gold_e = ilp_optimisation(train_nodes, train_edges, weights, top,
nodes_cost=-cost.loc[train_nodes.index],
edge_cost=-cost.loc[train_edges.index])
gold_global = gold_n.sum(axis=0) + gold_e.sum(axis=0)
# Run with positive cost
ilp_n, ilp_e = ilp_optimisation(train_nodes, train_edges, weights, top,
nodes_cost=cost.loc[train_nodes.index],
edge_cost=cost.loc[train_edges.index])
ilp_global = ilp_n.sum(axis=0) + ilp_e.sum(axis=0)
# Adagrad
gradient = ilp_global - gold_global
eta = 1.0
epsilon = 1.0
learning_rate = eta / np.sqrt(np.sum(gradient ** 2) + epsilon)
new_weights = weights - learning_rate * gradient
return new_weights, gold_e, ilp_e
def train(training_path: Path, gold_path: Path, alignment: Alignment, loss: str) -> np.array:
"""
Train the weights for the scoring method using ILP and AdaGrad. The preprocessing is done parallelly.
Each node/edge from each AMR graph is represented as a set of binary attributes.
The importance score of a node/edge is given by the linear combination of its attributes given a weight vector.
The weight vector is initialized as a vector of 1, then it is updated via AdaGrad
using a loss function given as a parameter (perceptron or ramp) through supervised learning.
Parameters:
training_path (Path): The corpus to use as training.
gold_path (Path): The corpus to use as target.
alignment (Alignment): The concept alignments for both train and target corpora.
loss (str): Which loss function to use (perceptron or ramp)
Returns:
array: Optimized weights for the scoring of nodes and edges.
"""
# Create training instances parallelly through the prepare_training_data functiontances
with ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() - 1) as executor:
# Organize arguments for mapping
train_filepaths = list()
target_filepaths = list()
for instance_path in training_path.iterdir():
train_filepaths.append(instance_path)
target_filepaths.append(gold_path / instance_path.name)
alignment_arg = repeat(alignment)
# Create training and target representations
result = executor.map(prepare_training_data,
train_filepaths,
target_filepaths,
alignment_arg)
# Combine all results from the parallel processing
# Also provide one-hot encoding for concept attributes
local_reprs_df = pd.get_dummies( | pd.concat(result) | pandas.concat |
from Aligner import Aligner
from utils import *
import pandas as pd
class annotation2MRPC_Aligner(Aligner):
"""
gets gold crowdsourced alignments in a csv format
and convert them to a .tsv file that fits the huggingface 'transformers' MRPC format (a classification paraphrasing model)
"""
def __init__(self, data_path='.', mode='dev',
log_file='results/dev_log.txt', metric_precompute=True, output_file = './dev.tsv',
database='duc2004,duc2007,MultiNews'):
super().__init__(data_path=data_path, mode=mode,
log_file=log_file, metric_precompute=metric_precompute, output_file = output_file,
database=database)
self.filter_data = False
self.use_stored_alignment_database = False
self.alignment_database_list = []
self.docSentsOIE = True
self.alignment_database = pd.DataFrame(columns=['Quality', '#1 ID', '#2 ID', '#1 String', '#2 String','database', 'topic',
'summaryFile', 'scuSentCharIdx', 'scuSentence', 'documentFile', 'docSentCharIdx',
'docSentText', 'docSpanOffsets', 'summarySpanOffsets', 'docSpanText', 'summarySpanText'])
def main_filter(self, scu, doc_spans):
scu_offset_str = offset_list2str(scu['scuOffsets'])
id_scu = scu['topic'] + '_' + scu_offset_str
for doc_span in doc_spans:
doc_offset_str = offset_list2str(doc_span['docScuOffsets'])
id_doc_sent = scu['topic'] + '_' + doc_span['documentFile'] + '_' + doc_offset_str
label = 0 #label =0 for all. positive samples' label would be changed later
self.alignment_database_list.append([label, id_scu, id_doc_sent,
scu['scuText'],
doc_span['docScuText'], scu['database'],
scu['topic'], scu['summaryFile'],
scu['scuSentCharIdx'],
scu['scuSentence'],
doc_span['documentFile'],
doc_span['docSentCharIdx'],
doc_span['docSentText'],
offset_list2str(
doc_span['docScuOffsets']),
offset_list2str(scu['scuOffsets']),
doc_span['docScuText'], scu['scuText']])
def metric_filter(self, scu):
if self.filter_data:
return super().metric_filter(scu)
return self.doc_sents
def scu_span_aligner(self):
if self.use_stored_alignment_database:
if self.mode == 'dev':
self.alignment_database = | pd.read_pickle("./span_alignment_database_dev.pkl") | pandas.read_pickle |
import zimp_clf_client
import mlflow
import pandas as pd
import os
import time
import logging
from zimp_clf_client.rest import ApiException
from experiment.config import Config
from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score, precision_score, recall_score
def get_or_create_mlflow_experiment(experiment_name):
existing_exp = mlflow.get_experiment_by_name(experiment_name)
if existing_exp is not None:
return existing_exp
exp_id = mlflow.create_experiment(experiment_name)
return mlflow.get_experiment(exp_id)
class Experiment:
def __init__(self, config: Config):
self.config = config
# init classification API
configuration = zimp_clf_client.Configuration()
configuration.host = config.classification_service_url
api_client = zimp_clf_client.ApiClient(configuration=configuration)
api_client.rest_client.pool_manager.connection_pool_kw['retries'] = 10 # in case api is unstable
self.train_api = zimp_clf_client.TrainingApi(api_client)
self.predict_api = zimp_clf_client.PredictionApi(api_client)
self.download_api = zimp_clf_client.DownloadApi(api_client)
# init mlflow API
mlflow.set_tracking_uri(config.mlflow_url)
self.mlflow_experiment = get_or_create_mlflow_experiment(config.experiment_name)
# resource paths
self.train_path = os.path.join('resources', config.dataset, 'train.csv')
self.test_path = os.path.join('resources', config.dataset, 'test.csv')
def run(self):
with mlflow.start_run(experiment_id=self.mlflow_experiment.experiment_id,
run_name=self.config.run_name) as mlflow_run:
mlflow.log_param('model_type', self.config.model_type)
mlflow.log_param('zimp_mechanism', 'None')
mlflow.log_param('random_seed', self.config.random_seed)
mlflow.log_param('dataset', self.config.dataset)
# TRAIN
ref_time = time.time()
self.train_api.clf_train_post(file=self.train_path, model_type=self.config.model_type,
seed=self.config.random_seed, asynchronous='true')
self.wait_for_train_completion() # poll api until training is completed
mlflow.log_metric('train_time_sec', time.time() - ref_time)
# EVAL TRAIN
ref_time = time.time()
self.predict_file_async(self.train_path, metric_prefix='train_')
mlflow.log_metric('train_predict_time_sec', time.time() - ref_time)
# EVAL TEST
ref_time = time.time()
self.predict_file_async(self.test_path, metric_prefix='test_')
mlflow.log_metric('test_predict_time_sec', time.time() - ref_time)
self.store_model()
logging.debug(self.train_api.clf_training_status_get())
def exists_in_mlflow(self) -> bool:
"""
:return: True if a successful experiment exists in mlflow which has the same run name
"""
run_cnt = mlflow.search_runs(
experiment_ids=[self.mlflow_experiment.experiment_id],
filter_string=f'tags."mlflow.runName"="{self.config.run_name}" attributes.status="FINISHED"').shape[0]
return run_cnt > 0
def store_model(self):
"""
retrieves trained model from clf-api and stores it in mlflow
:return:
"""
model_path = 'resources/model'
binary_file = self.download_api.clf_download_get(_preload_content=False).data
with open(model_path, 'wb') as f:
f.write(binary_file)
mlflow.log_artifact(model_path)
def predict_file_async(self, file_path, metric_prefix=""):
"""
sends complete file for prediction and polls for completion
:param file_path: path to the file which should be predicted ('text', 'target')
:return:
"""
tmp_file = 'prediction_input.csv'
df_pred = pd.read_csv(file_path)
df_pred['text'].to_csv(tmp_file, index=False)
result_id = self.predict_api.clf_file_predict_proba_post(file=tmp_file)['resultId']
self.wait_for_predict_completion(file_path, result_id, metric_prefix)
def get_predictions_for_file(self, file_path):
"""
retrieves predictions and related certainty for all texts in the supplied file
:param file_path: path to the file which should be predicted ('text', 'target')
:return: pandas df which contains loaded data plus prediction and certainty cols
"""
batch_size = 6 if self.config.model_type == 'BERT' else 128 # OOM-exception for BERT
df_pred = pd.read_csv(file_path)
df_pred['prediction'] = ''
df_pred['certainty'] = 0
df_pred['target'] = df_pred['target'].astype(str)
for idx in range(0, df_pred.shape[0], batch_size):
clf_response = self.get_api_prediction({'n': 1, 'texts': df_pred.loc[idx:idx+batch_size-1, 'text'].tolist()})
df_pred.loc[idx:idx+batch_size-1, 'prediction'] = [res['labels'][0]['label'] for res in clf_response]
df_pred.loc[idx:idx+batch_size-1, 'certainty'] = [res['labels'][0]['probability'] for res in clf_response]
return df_pred
def get_api_prediction(self, request_body):
"""
wrapper for predict_proba API call which adds a retry in case of gateway errors (may happen with slow bert model)
:param request_body:
:return:
"""
attempt_count = 0
while attempt_count < 10:
if attempt_count > 0:
logging.info("Retry API CALL")
try:
clf_response = self.predict_api.clf_m_predict_proba_post(body=request_body)
except ApiException as e:
logging.warning("Predict Call failed", e)
attempt_count += 1
else:
return clf_response
raise ApiException(0, 'API-Call fails consistently. Pleas check logs')
def safe_get_status(self):
try:
train_state = self.train_api.clf_training_status_get()
return train_state['isTrained']
except ApiException as e:
logging.warning("Status Call failed", e)
return False
def wait_for_train_completion(self):
wait_time = 1
while True:
if self.safe_get_status() :
logging.info('Training completed.')
break
logging.info(f'Training not completed. Waiting for {int(wait_time)} seconds..')
time.sleep(int(wait_time))
wait_time += 0.1
def safe_get_predictions(self, result_id, prediction_path):
try:
csv_file = self.download_api.clf_file_predictions_id_get(id=result_id, _preload_content=False).data
with open(prediction_path, 'wb') as f:
f.write(csv_file)
except ApiException as e:
logging.warning("Prediction Poll Call failed", e)
if not os.path.exists(prediction_path):
return | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import psutil
import time
import redis
import pytz
import pandas as pd
import numpy as np
import re
import subprocess
from apscheduler.schedulers.background import BackgroundScheduler
def job_renew():
r1 = redis.StrictRedis(host='', port=6379, password='')
r1.flushall()
cpu_interval = 30
endpoint = "SERVER"
refresh_interval = 300
#监控多台终端redisdb 状态
def redis_agent():
_redis_cli = r'C:\ProgramData\Redis\redis-cli'
mem_regex = re.compile(r'(\w+):([0-9]+\.?[0-9]*\w)\r') #regex = re.compile(r'(\w+):([0-9]+\.?[0-9]*)\r')
key_regex = re.compile(r'(\w+):(\w+\=?[0-9]*)\,')
hosts = []
port = '6379'
passwd = ''
for host in hosts:
_cmd = '%s -h %s -p %s -a %s info' % (_redis_cli, host, port, passwd)
try:
child = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
info = child.stdout.read().decode()
except:
info = ''
memused = ''
memmax = ''
connected = ''
memdict = dict(mem_regex.findall(info))
if memdict:
if 'used_memory_rss_human' in memdict.keys():
memused = memdict['used_memory_rss_human']
if 'maxmemory_human' in memdict.keys():
memmax = memdict['maxmemory_human']
if 'connected_clients' in memdict.keys():
connected = memdict['connected_clients']
db0key = ''
db1key = ''
db2key = ''
db3key = ''
keydict = dict(key_regex.findall(info))
if keydict:
if 'db0' in keydict.keys():
db0key = keydict['db0']
if 'db1' in keydict.keys():
db1key = keydict['db1']
if 'db2' in keydict.keys():
db2key = keydict['db2']
if 'db3' in keydict.keys():
db3key = keydict['db3']
print('DB:{} mem:{} used:{} conn:{} db0:{} db1:{} db2:{} db3:{}'.format(host,memmax,memused,connected,db0key,db1key,db2key,db3key))
print('##'*40)
def job_agent():
payload = []
eps = ['EP1','EP2','EP3','EP4','EP5','EP6','EP7','EP8','EP9','EP10','EP11','EP12','EP13','EP14','EP15','EP16','EP17','EP18','EP19','EP20']
_rd = redis.StrictRedis(host=YOUHOSTIP, port=6379, password='', db=0, decode_responses=True)
#datadict = {"Host":str(net_addrs_status)[lstart:lstart+lend],"Mem_total":round(mem_status.total/1024/1024/1024,1),"ts":int(time.time()),"CPU_up":round(100-cpu_status.idle,1),"Mem_up":mem_status.percent}
for row in eps:
try:
datadict = _rd.hmget(row,['Host','Mem_total','ts','CPU_up','Mem_up'])
if None not in datadict:
payload.append(datadict)
except:
print('@{} {} is not on line'.format(time.strftime('%Y%m%d %H:%M:%S'),row))
if len(payload) == 0:
print('@{} abvalable mechines nums:0'.format(time.strftime('%Y%m%d %H:%M:%S')))
elif len(payload) <= 5: #小于5台终端在线时不分配运行任务
data = | pd.DataFrame(payload) | pandas.DataFrame |
import math
import numpy as np
import pandas as pd
from footings.utils import dispatch_function
def _month_diff(start, end):
months = (end.year - start.year) * 12 + (end.month - start.month)
if end.day > start.day:
months += 1
return months
def _day_diff(start, end):
return (end - start).days
@dispatch_function(key_parameters=("frequency",))
def freq_dispatcher(
start_dt: pd.Timestamp,
end_dt: pd.Timestamp,
col_date_nm: str,
frequency: str,
end_duration: str = None,
):
msg = "No registered function based on passed paramters and no default function."
raise NotImplementedError(msg)
@freq_dispatcher.register(frequency="Y")
def _(
start_dt: pd.Timestamp,
end_dt: pd.Timestamp,
col_date_nm: str,
end_duration: str = None,
):
periods = math.ceil(_month_diff(start_dt, end_dt) / 12) + 1
frame = pd.DataFrame()
frame[col_date_nm] = pd.to_datetime(
[start_dt + pd.DateOffset(years=period) for period in range(0, periods)]
)
if end_duration is not None:
frame[end_duration] = pd.to_datetime(
[start_dt + pd.DateOffset(years=period) for period in range(1, periods + 1)]
)
return frame
@freq_dispatcher.register(frequency="Q")
def _(
start_dt: pd.Timestamp,
end_dt: pd.Timestamp,
col_date_nm: str,
end_duration: str = None,
):
periods = math.ceil(_month_diff(start_dt, end_dt) / 3) + 1
frame = pd.DataFrame()
frame[col_date_nm] = pd.to_datetime(
[start_dt + pd.DateOffset(months=period * 3) for period in range(0, periods)]
)
if end_duration is not None:
frame[end_duration] = pd.to_datetime(
[
start_dt + | pd.DateOffset(months=period * 3) | pandas.DateOffset |
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 14:41:30 2020
@author: wjcongyu
"""
import _init_pathes
import os
import tensorflow as tf
from configs.cfgs import cfg
from models.risk_predictor import build_network
from data_process.data_generator1 import DataGenerator
from data_process.data_processor import readCsv
from models.backend import find_weights_of_last
import numpy as np
import argparse
import os.path as osp
from tensorflow.keras import models as KM
import pandas as pd
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('-train_subsets', '--train_subsets', help='the subsets for training.', type = str, default ='1;2;3')
parser.add_argument('-eval_subsets', '--eval_subsets', help='the subset for test, others for training.', type = str, default ='4.lbl')
parser.add_argument('-batch_size', '--batch_size', help='the mini-batch size.', type = int, default = 72)
parser.add_argument('-cuda_device', '--cuda_device', help='runining on specified gpu', type = int, default = 0)
parser.add_argument('-save_root', '--save_root', help='root path to save the prediction results.', type = str, default = 'eval_results')#1:yes 0:no
parser.add_argument('-save_flag', '--save_flag', help='flag for saving result file name.', type = str, default = '')#1:yes 0:no
parser.add_argument('-treatment', '--treatment', help='the treatment for prediction.', type = str, default ='0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
parser.add_argument('-gt_ctimages', '--gt_ctimages', help='using ctimages or not.', type = int, default = 0)#1:yes 0:no
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.cuda_device)
eval_files = []
eval_subsets = args.eval_subsets.split(';')
for i in range(len(eval_subsets)):
eval_files.append(os.path.join(cfg.data_set, eval_subsets[i]))
val_data_generator = DataGenerator(eval_files, cfg, train_mode=False)
eval_sample_num = val_data_generator.load_dataset()
treattype = {0:'WithoutTreatment', 1:'WithTreatment'}
if args.treatment == '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0':
model_dir_name = args.train_subsets +'_modelweights_'+ treattype[0]
else:
model_dir_name = args.train_subsets +'_modelweights_'+ treattype[1]
if args.gt_ctimages==0:
model_dir_name += '_Withoutimage'
#model_dir_name = args.train_subsets +'_modelweights_'+ treattype[1]
treatments = np.array(args.treatment.split(','), dtype=np.int32)
treatment_names = {'1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1':'GT',
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0':'NONE',
'1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0':'MPN',
'0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0':'CP',
'0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0':'OV',
'0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0':'TB',
'0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0':'ABD',
'0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0':'RV',
'0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0':'XBJ',
'0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0':'LQC',
'0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0':'CPP',
'0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0':'PPL',
'0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0':'MFN',
'0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0':'LFN',
'0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0':'LZD',
'0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0':'HPN',
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0':'IGN',
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0':'VC',
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0':'ACN',
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0':'ABX',
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1':'HFNC'}
model = build_network([ cfg.treatment_infosize, cfg.im_feedsize, cfg.patient_infosize], [cfg.time_range], False, name='risk_predictor')
feature_idx = [19, 31]
checkpoint_file = find_weights_of_last(os.path.join(cfg.CHECKPOINTS_ROOT, model_dir_name), 'risk_predictor')
print('############################',os.path.join(cfg.CHECKPOINTS_ROOT, model_dir_name))
if checkpoint_file != '':
print ('@@@@@@@@@@ loading pretrained from ', checkpoint_file)
model.load_weights(checkpoint_file)
else:
assert('no weight file found!!!')
print (model.summary())
#print layer information
for i in range(len(model.layers)):
layer = model.layers[i]
print(i, layer.name, layer.output.shape)
#define the output of the network to get
outputs = [model.layers[i].output for i in feature_idx]
pred_model = KM.Model(inputs=model.inputs, outputs=outputs)
save_root = args.save_root
if not osp.exists(save_root):
os.mkdir(save_root)
feature_significance = []
risk_reg_preds = []
gt_hitday = []
gt_eventindicator = []
gt_features = []
gt_covid_severity = []
gt_treatments = []
gt_pids = []
for step in range(eval_sample_num//(args.batch_size)+1):
start = datetime.datetime.now()
evbt_painfo, evbt_treatinfo, evbt_ims, evbt_treattimes,evbt_censor_indicator, evbt_severity, evbt_pids \
= val_data_generator.next_batch(args.batch_size)
if args.treatment == '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1':
feed_treatinfo = evbt_treatinfo
else:
feed_treatinfo= np.zeros_like(evbt_treatinfo)
feed_treatinfo[:,...] = treatments
if args.gt_ctimages==0:
feed_ims = tf.zeros_like(evbt_ims)
else:
feed_ims = evbt_ims
feed = [feed_treatinfo, feed_ims, evbt_painfo]
coff, reg_pred = pred_model(feed, training=False)
end = datetime.datetime.now()
print('processing time:', end-start)
risk_reg_preds.append(reg_pred)
feature_significance.append(coff)
gt_hitday.append(evbt_treattimes)
gt_eventindicator.append(evbt_censor_indicator)
gt_features.append(evbt_painfo)
gt_covid_severity.append(evbt_severity)
gt_treatments.append(evbt_treatinfo)
gt_pids.append(evbt_pids)
risk_reg_preds = np.concatenate(risk_reg_preds, axis=0)
feature_significance = np.concatenate(feature_significance, axis=0)
gt_hitday = np.concatenate(gt_hitday, axis=0)
gt_eventindicator = np.concatenate(gt_eventindicator, axis=0)
gt_features = np.concatenate(gt_features, axis=0)
gt_covid_severity = np.concatenate(gt_covid_severity, axis=0)
gt_treatments = np.concatenate(gt_treatments, axis=0)
gt_pids = np.concatenate(gt_pids, axis=0)
pinfo_header = readCsv(eval_files[0])[0][1:48]
pinfo_header = pinfo_header[0:2]+pinfo_header[3:]
csv_file = os.path.join(save_root, '{0}_{1}_risk_reg_preds.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
save_data = pd.DataFrame(risk_reg_preds, columns=['day '+str(i) for i in range(cfg.time_range)])
save_data.to_csv(csv_file,header=True, index=False)
csv_file = os.path.join(save_root, '{0}_{1}_gt_hitday.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
save_data = pd.DataFrame(gt_hitday, columns=['hit day'])
save_data.to_csv(csv_file,header=True, index=False)
csv_file = os.path.join(save_root, '{0}_{1}_indicator.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
save_data = pd.DataFrame(gt_eventindicator, columns=['indicator'])
save_data.to_csv(csv_file,header=True, index=False)
csv_file = os.path.join(save_root, '{0}_{1}_clinic_features.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
save_data = pd.DataFrame(gt_features, columns=pinfo_header)
save_data.to_csv(csv_file,header=True, index=False)
csv_file = os.path.join(save_root, '{0}_{1}_gt_severity.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
save_data = pd.DataFrame(gt_covid_severity, columns=['severity'])
save_data.to_csv(csv_file,header=True, index=False)
csv_file = os.path.join(save_root, '{0}_{1}_gt_treatment.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
treat_header = ['MPN','CP','OV','TB','ABD','RV','XBJ','LQC','CPP','PPL','MFN','LFN','LZD','HPN','IGN','VC','ACN','ABX','HFNC']
save_data = pd.DataFrame(gt_treatments, columns=treat_header)
save_data.to_csv(csv_file,header=True, index=False)
csv_file = os.path.join(save_root, '{0}_{1}_patient_ids.csv'.format(args.eval_subsets+args.save_flag, treatment_names[args.treatment]))
save_data = | pd.DataFrame(gt_pids, columns=['pid']) | pandas.DataFrame |
import time
from itertools import product
from typing import List, Dict, Tuple, Union
import pandas as pd
from scipy.spatial.distance import cosine
from scipy.stats import ks_2samp, wasserstein_distance
from sklearn.preprocessing import LabelEncoder
from typeguard import typechecked
from .Report import Report
def validate_create_report_attributes(enable_patterns_report: bool,
patterns_report_group_by_categorical_features: Union[str, List[str]],
patterns_report_group_by_numerical_features: Union[str, List[str]],
patterns_report_number_of_bins: Union[int, List[int]],
enable_parallel_coordinates_plot: bool,
cosine_similarity_threshold: float,
parallel_coordinates_q1_threshold: float,
parallel_coordinates_q2_threshold: float,
parallel_coordinates_features: Union[str, List[str]],
categorical_features: List[str],
numerical_features: List[str],
all_features: List[str]):
if type(enable_patterns_report) is not bool:
raise TypeError('provided enable_patterns_report is not valid. enable_patterns_report has to be a bool')
if type(patterns_report_group_by_categorical_features) is str \
and patterns_report_group_by_categorical_features != 'all':
raise AttributeError('''provided patterns_report_group_by_categorical_features is not valid.
patterns_report_group_by_categorical_features has to be "all" if the provided value is a string''')
if type(patterns_report_group_by_numerical_features) is str \
and patterns_report_group_by_numerical_features != 'all':
raise AttributeError('''provided patterns_report_group_by_numerical_features is not valid.
patterns_report_group_by_numerical_features has to be "all" if the provided value is a string''')
if type(patterns_report_group_by_categorical_features) is list \
and len(patterns_report_group_by_categorical_features) > 0:
unknown_features = [feature for feature in patterns_report_group_by_categorical_features if
feature not in categorical_features]
if len(unknown_features) > 0:
raise AttributeError(f'''provided patterns_report_group_by_categorical_features is not valid.
these features {unknown_features} do not exist in the categorical features''')
if type(patterns_report_group_by_numerical_features) is list \
and len(patterns_report_group_by_numerical_features) > 0:
unknown_features = [feature for feature in patterns_report_group_by_numerical_features if
feature not in numerical_features]
if len(unknown_features) > 0:
raise AttributeError(f'''provided patterns_report_group_by_numerical_features is not valid.
these features {unknown_features} do not exist in the numerical features''')
if type(patterns_report_number_of_bins) is list \
and type(patterns_report_group_by_numerical_features) is str:
raise AttributeError('''provided patterns_report_number_of_bins is not valid.
patterns_report_number_of_bins can be a list of ints if a list of numerical features were provided in patterns_report_group_by_numerical_features''')
if type(patterns_report_number_of_bins) is list \
and type(patterns_report_group_by_numerical_features) is list:
if len(patterns_report_number_of_bins) != len(patterns_report_group_by_numerical_features):
raise AttributeError('''provided patterns_report_number_of_bins is not valid.
patterns_report_number_of_bins list length has to be equal to the number of features provided in patterns_report_group_by_numerical_features''')
if type(enable_parallel_coordinates_plot) is not bool:
raise TypeError(
'provided enable_parallel_coordinates_plot is not valid. enable_parallel_coordinates_plot has to be a bool')
if type(cosine_similarity_threshold) is not float:
raise TypeError(
'provided cosine_similarity_threshold is not valid. cosine_similarity_threshold has to be a float')
if cosine_similarity_threshold <= 0.0 or cosine_similarity_threshold >= 1.0:
raise AttributeError(
'provided cosine_similarity_threshold is not valid. cosine_similarity_threshold has to be between 0.0 and 1.0')
if type(parallel_coordinates_q1_threshold) is not float:
raise TypeError(
'provided parallel_coordinates_q1_threshold is not valid. parallel_coordinates_q1_threshold has to be a float')
if type(parallel_coordinates_q2_threshold) is not float:
raise TypeError(
'provided parallel_coordinates_q2_threshold is not valid. parallel_coordinates_q2_threshold has to be a float')
if parallel_coordinates_q1_threshold <= 0.0 or parallel_coordinates_q1_threshold >= 1.0:
raise AttributeError(
'provided parallel_coordinates_q1_threshold is not valid. parallel_coordinates_q1_threshold has to be between 0.0 and 1.0')
if parallel_coordinates_q2_threshold <= 0.0 or parallel_coordinates_q2_threshold >= 1.0:
raise AttributeError(
'provided parallel_coordinates_q2_threshold is not valid. parallel_coordinates_q2_threshold has to be between 0.0 and 1.0')
if parallel_coordinates_q2_threshold <= parallel_coordinates_q1_threshold:
raise AttributeError('''provided parallel_coordinates_q1_threshold and parallel_coordinates_q2_threshold are not valid.
parallel_coordinates_q2_threshold has to greater than parallel_coordinates_q1_threshold''')
if type(parallel_coordinates_features) is str and parallel_coordinates_features != 'auto':
raise AttributeError('''provided parallel_coordinates_features is not valid.
parallel_coordinates_features has to be "auto" if the provided value is a string''')
if type(parallel_coordinates_features) is list and len(parallel_coordinates_features) > 0:
unknown_features = [feature for feature in parallel_coordinates_features if feature not in all_features]
if len(unknown_features) > 0:
raise AttributeError(f'''provided parallel_coordinates_features is not valid.
these features {unknown_features} do not exist in the dataframe''')
if type(parallel_coordinates_features) is list and len(parallel_coordinates_features) < 2:
raise AttributeError(f'''provided parallel_coordinates_features is not valid.
parallel_coordinates_features has to contain at least two features to plot''')
def validate_attributes(train_df, test_df, target_feature_name, error_column_name,
error_classes, acceptable_error_class, numerical_features, categorical_features):
if type(train_df) is not pd.DataFrame:
raise TypeError('provided train_df is not valid. train_df has to be a pandas dataframe')
if type(test_df) is not pd.DataFrame:
raise TypeError('provided test_df is not valid. test_df has to be a pandas dataframe')
train_columns = train_df.columns.to_list()
test_columns = test_df.columns.to_list()
if type(target_feature_name) is not str:
raise TypeError(f'''provided target_feature_name is not valid.
\ntarget_feature_name ({target_feature_name}) has to be a str''')
if target_feature_name not in train_columns:
raise AttributeError(f'provided target_feature_name ({target_feature_name}) is not train_df')
if target_feature_name not in test_columns:
raise AttributeError(f'provided target_feature_name ({target_feature_name}) is not test_df')
if type(error_column_name) is not str:
raise TypeError(f'''provided error_column_name is not valid.
\ntest_error_column_name ({error_column_name}) has to be a str''')
if error_column_name not in train_columns:
raise AttributeError(f'provided error_column_name ({error_column_name}) is not train_df')
if error_column_name not in test_columns:
raise AttributeError(f'provided error_column_name ({error_column_name}) is not test_df')
if acceptable_error_class is not None and type(acceptable_error_class) is not str:
raise TypeError(f'''provided acceptable_error_class is not valid.
\nacceptable_error_class ({acceptable_error_class}) has to be a str or None''')
if acceptable_error_class is not None and acceptable_error_class not in error_classes:
raise AttributeError(f'''provided acceptable_error_class is not valid.
\n{acceptable_error_class} has to be defined in error_classes''')
if numerical_features is None and categorical_features is None:
raise AttributeError('''both numerical_features and categorical_features are not defined.
\nyou need to provide one of them or both in order to proceed.''')
def _cosine_similarity(vector_a, vector_b):
return 1.0 - cosine(vector_a, vector_b)
@typechecked
class RegressionErrorAnalysisReport(Report):
"""
RegressionErrorAnalysisReport creates a report that analyzes the error in regression problems.
Attributes
----------
title : str
the title of the report
output_directory : str
the directory where the report folder will be created
train_df : pd.DataFrame
the training pandas dataframe of the regression problem which should include the target feature
test_df : pd.DataFrame
the testing pandas dataframe of the regression problem which should include the target feature
and the error column in order to calculate the error class
target_feature_name : str
the name of the regression target feature
error_column_name : str
the name of the calculated error column 'Prediction - Target' (see example on github for more information)
error_classes : Dict[str, Tuple]
a dictionary containing the definition of the error classes that will be created.
The key is the error_class name and the value is the minimum (inclusive) and maximum (exclusive)
which will be used to calculate the error_class of the test observations.
For example: error_classes = {
'EXTREME_UNDER_ESTIMATION': (-8.0, -4.0),
returns 'EXTREME_UNDER_ESTIMATION' if -8.0 <= error < -4.0
'HIGH_UNDER_ESTIMATION': (-4.0, -3.0),
returns 'HIGH_UNDER_ESTIMATION' if -4.0 <= error < -3.0
'MEDIUM_UNDER_ESTIMATION': (-3.0, -1.0),
returns 'MEDIUM_UNDER_ESTIMATION' if -3.0 <= error < -1.0
'LOW_UNDER_ESTIMATION': (-1.0, -0.5),
returns 'LOW_UNDER_ESTIMATION' if -1.0 <= error < -0.5
'ACCEPTABLE': (-0.5, 0.5),
returns 'ACCEPTABLE' if -0.5 <= error < 0.5
'OVER_ESTIMATING': (0.5, 3.0) }
returns 'OVER_ESTIMATING' if -0.5 <= error < 3.0
acceptable_error_class: str
the name of the acceptable error class that was defined in error_classes
numerical_features : List[str] default=None
a list of the numerical features to be included in the report
categorical_features : List[str] default=None
a list of the categorical features to be included in the report
subtitle : str default=None
an optional subtitle to describe your report
report_folder_name : str default=None
the name of the folder that will contain all the generated report files.
If not set, the title of the report will be used.
encryption_secret : str default=None
the 16 characters secret that will be used to encrypt the generated report data.
If it is not set, the generated data won't be encrypted.
generate_encryption_secret : bool default=False
the encryption_secret will be generated and its value returned as output.
you can also view encryption_secret to get the generated secret.
Methods
-------
create_report()
creates the error analysis report
"""
def __init__(self,
title: str,
output_directory: str,
train_df: pd.DataFrame,
test_df: pd.DataFrame,
target_feature_name: str,
error_column_name: str,
error_classes: Dict[str, Tuple[float, float]],
acceptable_error_class: str,
numerical_features: List[str] = None,
categorical_features: List[str] = None,
subtitle: str = None,
report_folder_name: str = None,
encryption_secret: str = None,
generate_encryption_secret: bool = False):
super().__init__(title,
output_directory,
subtitle,
report_folder_name,
encryption_secret,
generate_encryption_secret)
validate_attributes(train_df,
test_df,
target_feature_name,
error_column_name,
error_classes,
acceptable_error_class,
numerical_features,
categorical_features)
self.train_df = train_df.copy()
self.test_df = test_df.copy()
self.target_feature_name = target_feature_name
self.error_column_name = error_column_name
self.error_classes = error_classes.copy()
self.acceptable_error_class = acceptable_error_class
self.numerical_features = numerical_features[:]
self.categorical_features = categorical_features[:]
self._training_data_name = 'Training data'
self._testing_data_name = 'Testing data'
self._error_class_col_name = 'ERROR_CLASS'
self._primary_datasets = [self._training_data_name, self.acceptable_error_class]
self._secondary_datasets = [self._testing_data_name]
self._secondary_datasets.extend(list(self.error_classes.keys()))
self._template_name = 'regression-error-analysis-report'
@typechecked
def create_report(self,
enable_patterns_report: bool = True,
patterns_report_group_by_categorical_features: Union[str, List[str]] = 'all',
patterns_report_group_by_numerical_features: Union[str, List[str]] = 'all',
patterns_report_number_of_bins: Union[int, List[int]] = 10,
enable_parallel_coordinates_plot: bool = True,
cosine_similarity_threshold: float = 0.8,
parallel_coordinates_q1_threshold: float = 0.25,
parallel_coordinates_q2_threshold: float = 0.75,
parallel_coordinates_features: Union[str, List[str]] = 'auto') -> None:
"""
Creates a report using the user defined data and the data calculated based on the error.
:param enable_patterns_report: enables the patterns report. default: True
:param patterns_report_group_by_categorical_features: categorical features to use in the patterns report. default: 'all'
:param patterns_report_group_by_numerical_features: numerical features to use in the patterns report. default: 'all'
:param patterns_report_number_of_bins: number of bins to use for each provided numerical feature
or one number of bins to use for all provided numerical features. default: 10
:param enable_parallel_coordinates_plot: enables the parallel coordinates plot. default: True
:param cosine_similarity_threshold: The cosine similarity threshold to decide if the categorical distribution of
the primary and secondary datasets are similar.
:param parallel_coordinates_q1_threshold: the first quantile threshold to be used
if parallel_coordinates_features == 'auto'. default: 0.25
:param parallel_coordinates_q2_threshold: the second quantile threshold to be used
if parallel_coordinates_features == 'auto'. default: 0.75
:param parallel_coordinates_features: The list of features to display on the parallel coordinates plot. default: 'auto'
- If parallel_coordinates_features is set to 'auto', OlliePy will select the features with a distribution shift based on 3 thresholds:
- cosine_similarity_threshold to be used to select categorical features if the cosine_similarity is lower than the threshold.
- parallel_coordinates_q1_threshold and parallel_coordinates_q2_threshold which are two quantile values.
if primary_quantile_1 >= secondary_quantile_2 or secondary_quantile_1 >= primary_quantile_2
then the numerical feature is selected and will be added to the plot.
:return: None
"""
self.report_data['report'] = {}
validate_create_report_attributes(enable_patterns_report,
patterns_report_group_by_categorical_features,
patterns_report_group_by_numerical_features,
patterns_report_number_of_bins,
enable_parallel_coordinates_plot,
cosine_similarity_threshold,
parallel_coordinates_q1_threshold,
parallel_coordinates_q2_threshold,
parallel_coordinates_features,
self.categorical_features,
self.numerical_features,
self.train_df.columns.tolist())
tic = time.perf_counter()
self._add_user_defined_data()
self._add_error_class_to_test_df()
self._add_datasets()
self._add_statistical_tests(cosine_similarity_threshold)
if self.categorical_features is not None and len(self.categorical_features) > 0:
self._add_categorical_count_plot()
if enable_parallel_coordinates_plot:
self._add_parallel_coordinates_plot(cosine_similarity_threshold,
parallel_coordinates_q1_threshold,
parallel_coordinates_q2_threshold,
parallel_coordinates_features)
if enable_patterns_report:
self._find_and_add_all_secondary_datasets_patterns(patterns_report_group_by_categorical_features,
patterns_report_group_by_numerical_features,
patterns_report_number_of_bins)
toc = time.perf_counter()
print(f"The report was created in {toc - tic:0.4f} seconds")
if self.encryption_secret:
print(f'Your encryption secret is {self.encryption_secret}')
def _add_user_defined_data(self) -> None:
"""
Adds user defined data to the report.
:return: None
"""
self._update_report({'primaryDatasets': self._primary_datasets})
self._update_report({'secondaryDatasets': self._secondary_datasets})
if self.numerical_features:
if self.target_feature_name not in self.numerical_features:
self.numerical_features.append(self.target_feature_name)
self._update_report({'numericalFeatures': self.numerical_features})
if self.categorical_features:
self._update_report({'categoricalFeatures': self.categorical_features})
self._update_report({'targetFeature': self.target_feature_name})
def _add_error_class_to_test_df(self) -> None:
"""
adds the error class to each observation in the test set (test_df) based on the
error classes provided by the user.
:return: None
"""
def add_error_class(error: float) -> str:
for error_class, min_max in self.error_classes.items():
minimum, maximum = min_max
if minimum <= error < maximum:
return error_class
return 'UNDEFINED_ERROR_CLASS'
self.test_df[self._error_class_col_name] = self.test_df[self.error_column_name].apply(add_error_class)
def _add_datasets(self) -> None:
"""
Adds datasets to reports (info, stats, numerical data).
:return: None
"""
datasets_dict = {}
def add_dataset(df: pd.DataFrame, dataset_name: str) -> None:
"""
Adds a dataset stats and data to the datasets_dict.
:param df: pd.DataFrame, the selected dataset dataframe
:param dataset_name: str, the dataset name
:return: None
"""
stats = {}
data = {}
if self.numerical_features is not None and len(self.numerical_features) > 0:
for feature in self.numerical_features:
stats[feature] = {
'min': df.loc[:, feature].min(),
'mean': df.loc[:, feature].mean(),
'std': df.loc[:, feature].std(),
'median': df.loc[:, feature].median(),
'max': df.loc[:, feature].max(),
'count': int(df.loc[:, feature].count()),
'missingCount': int(df.loc[:, feature].isna().sum()),
}
data[feature] = df.loc[:, feature].values.tolist()
if self.categorical_features is not None and len(self.categorical_features) > 0:
for feature in self.categorical_features:
stats[feature] = {
'uniqueCount': int(df.loc[:, feature].nunique()),
'missingCount': int(df.loc[:, feature].isna().sum())
}
dataset_dict = {dataset_name: {
'info': {
'name': dataset_name,
'numberOfRows': df.shape[0],
'minError': df.loc[:, self.error_column_name].min(),
'meanError': df.loc[:, self.error_column_name].mean(),
'stdError': df.loc[:, self.error_column_name].std(),
'medianError': df.loc[:, self.error_column_name].median(),
'maxError': df.loc[:, self.error_column_name].max(),
'errors': df.loc[:, self.error_column_name].tolist(),
'stats': stats
},
'data': data
}}
datasets_dict.update(dataset_dict)
add_dataset(self.train_df, self._training_data_name)
add_dataset(self.test_df, self._testing_data_name)
for error_class_name in self.error_classes.keys():
selected_df = self.test_df.loc[self.test_df[self._error_class_col_name] == error_class_name, :]
add_dataset(selected_df, error_class_name)
self._update_report({'datasets': datasets_dict})
def _count_categories_and_merge_count_dataframes(self, feature_name: str, primary_dataset: str,
secondary_dataset: str,
normalize=False) -> pd.DataFrame:
"""
It counts the different categories (of the provided feature) for the primary and secondary dataset then merge
the count dataframes into a single dataframe that contains all the categories.
It also fills missing values with 0.
:param feature_name: the feature name
:param primary_dataset: the primary dataset name
:param secondary_dataset: the secondary dataset name
:param normalize: whether to normalizr the categorical count, default:False
:return: the merged dataframe
"""
if primary_dataset == self._training_data_name:
primary_count_df = self.train_df.loc[:, feature_name].value_counts(normalize=normalize)
else:
primary_count_df = self.test_df.loc[
self.test_df[self._error_class_col_name] == primary_dataset, feature_name].value_counts(
normalize=normalize)
if secondary_dataset == self._testing_data_name:
secondary_count_df = self.test_df.loc[:, feature_name].value_counts(normalize=normalize)
else:
secondary_count_df = self.test_df.loc[
self.test_df[self._error_class_col_name] == secondary_dataset, feature_name].value_counts(
normalize=normalize)
primary_count_df = primary_count_df.reset_index() \
.rename({feature_name: primary_dataset, 'index': feature_name}, axis=1)
secondary_count_df = secondary_count_df.reset_index() \
.rename({feature_name: secondary_dataset, 'index': feature_name}, axis=1)
merged_cat_count = primary_count_df.merge(secondary_count_df, on=feature_name, how='outer').fillna(
0).sort_values(by=primary_dataset, ascending=False)
return merged_cat_count
def _add_categorical_count_plot(self) -> None:
"""
Add the categorical count plots (stacked bar plot) data to the report
:return: None
"""
def add_categorical_count_data(feature_dictionary: Dict, feature_name: str, primary_dataset: str,
secondary_dataset: str) -> None:
"""
Calculate the value counts for each dataset and for that particular categorical feature.
Then groups the value_counts() dataframes afterwards it computes the data needed for the stacked bar plot
in plotly.
:param feature_dictionary: the feature dictionary that will be added the categorical count plot data
:param feature_name: the feature name
:param primary_dataset: the primary dataset name
:param secondary_dataset: the secondary dataset name
:return: None
"""
merged_cat_count = self._count_categories_and_merge_count_dataframes(feature_name,
primary_dataset,
secondary_dataset,
normalize=False)
key = f'{primary_dataset}_{secondary_dataset}'
title = f'{primary_dataset} vs {secondary_dataset}'
categories = merged_cat_count.loc[:, feature_name].tolist()
primary_data = merged_cat_count.loc[:, primary_dataset].tolist()
secondary_data = merged_cat_count.loc[:, secondary_dataset].tolist()
feature_dictionary.update({key: {
'title': title,
'categories': categories,
'series': [
{
'name': primary_dataset,
'color': '#8180FF',
'data': primary_data
},
{
'name': secondary_dataset,
'color': '#FF938D',
'data': secondary_data
}
]
}})
categorical_count_dict = {}
for feature in self.categorical_features:
feature_dict = {}
for primary_dataset_name, secondary_dataset_name in product(self._primary_datasets,
self._secondary_datasets):
if primary_dataset_name != secondary_dataset_name:
add_categorical_count_data(feature_dict, feature, primary_dataset_name, secondary_dataset_name)
categorical_count_dict.update({feature: feature_dict})
self._update_report({'categorical_count_plots': categorical_count_dict})
def _get_primary_secondary_datasets(self, primary_dataset: str, secondary_dataset: str) -> Tuple[
pd.DataFrame, pd.DataFrame]:
"""
Finds the correct primary and secondary datasets and return them.
:param primary_dataset: the name of the primary dataset
:param secondary_dataset: the name of the secondary dataset
:return: primary_df, secondary_df
"""
if primary_dataset == self._training_data_name:
primary_df = self.train_df.copy()
primary_df.loc[:, self._error_class_col_name] = self._training_data_name
else:
primary_df = self.test_df.loc[self.test_df[self._error_class_col_name] == primary_dataset, :].copy()
if secondary_dataset == self._testing_data_name:
secondary_df = self.test_df.copy()
secondary_df.loc[:, self._error_class_col_name] = self._testing_data_name
else:
secondary_df = self.test_df.loc[self.test_df[self._error_class_col_name] == secondary_dataset, :].copy()
return primary_df, secondary_df
def _add_parallel_coordinates_plot(self,
cosine_similarity_threshold,
parallel_coordinates_q1_threshold,
parallel_coordinates_q2_threshold,
parallel_coordinates_features) -> None:
"""
Check for suitable features (numerical based on quantiles(default: 0.25, 0.75)
and categorical based on cosine similarity).
Afterwards it adds the needed data for the plotly parallel coordinates plot.
:param cosine_similarity_threshold: the cosine similarity threshold for the categorical features
:param parallel_coordinates_q1_threshold: the first quantile threshold to be used
if parallel_coordinates_features == 'auto'. default: 0.25
:param parallel_coordinates_q2_threshold: the second quantile threshold to be used
if parallel_coordinates_features == 'auto'. default: 0.75
:param parallel_coordinates_features: The list of features to display on the parallel coordinates plot. default: 'auto'
- If parallel_coordinates_features is set to 'auto', OlliePy will select the features with a distribution shift based on 3 thresholds:
- cosine_similarity_threshold to be used to select categorical features if the cosine_similarity is lower than the threshold.
- parallel_coordinates_q1_threshold and parallel_coordinates_q2_threshold which are two quantile values.
if primary_quantile_1 >= secondary_quantile_2 or secondary_quantile_1 >= primary_quantile_2
then the numerical feature is selected and will be added to the plot.
:return:
"""
def add_parallel_coordinates(parallel_coordinates_dictionary: Dict, primary_dataset: str,
secondary_dataset: str) -> None:
"""
Decides which features will be added to the parallel coordinates plot based on predefined thresholds.
Then prepares the data that is expected by the plotly parallel coordinates plot.
:param parallel_coordinates_dictionary: the parallel coordinates data dictionary
:param primary_dataset: the name of the primary dataset
:param secondary_dataset: the name of the secondary dataset
:return: None
"""
selected_features = [] if parallel_coordinates_features == 'auto' else parallel_coordinates_features
first_quantile_threshold = parallel_coordinates_q1_threshold
second_quantile_threshold = parallel_coordinates_q2_threshold
primary_df, secondary_df = self._get_primary_secondary_datasets(primary_dataset, secondary_dataset)
if self.categorical_features is not None and parallel_coordinates_features == 'auto':
for categorical_feature in self.categorical_features:
merged_cat_count = self._count_categories_and_merge_count_dataframes(categorical_feature,
primary_dataset,
secondary_dataset,
normalize=True)
primary_vector = merged_cat_count.loc[:, primary_dataset].tolist()
secondary_vector = merged_cat_count.loc[:, secondary_dataset].tolist()
cosine_similarity = _cosine_similarity(primary_vector, secondary_vector)
if cosine_similarity < cosine_similarity_threshold:
selected_features.append(categorical_feature)
if self.numerical_features is not None and parallel_coordinates_features == 'auto':
for numerical_feature in self.numerical_features:
primary_q_1 = primary_df.loc[:, numerical_feature].quantile(first_quantile_threshold)
primary_q_2 = primary_df.loc[:, numerical_feature].quantile(second_quantile_threshold)
secondary_q_1 = secondary_df.loc[:, numerical_feature].quantile(first_quantile_threshold)
secondary_q_2 = secondary_df.loc[:, numerical_feature].quantile(second_quantile_threshold)
if primary_q_1 >= secondary_q_2 or secondary_q_1 >= primary_q_2:
selected_features.append(numerical_feature)
if len(selected_features) > 0:
key = f'{primary_dataset}_{secondary_dataset}'
combined_df = | pd.concat([primary_df, secondary_df], axis=0) | pandas.concat |
from airflow import DAG
import pandas as pd
import datetime as dt
from airflow.operators.python import PythonOperator
from minio import Minio
import os
import glob
import functions as f
data_lake_server= f.var['data_lake_server_airflow']
data_lake_login= f.var['data_lake_login']
data_lake_password= f.var['data_lake_password']
client = Minio(
endpoint= data_lake_server,
access_key= data_lake_login,
secret_key= data_lake_password,
secure=False
)
dag = DAG(
dag_id="etl_client_clustering",
description="ETL - Client Clustering DataFrame",
start_date=dt.datetime(2021, 11, 29),
schedule_interval= "@once")
##################### olist_customers_dataset #####################
def extract_customers():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_customers_dataset.parquet',
file_path= 'tmp/olist_customers_dataset.parquet'
)
extract_customers_task = PythonOperator(
task_id= "extract_customers",
python_callable= extract_customers,
dag= dag)
##################### olist_orders_dataset #####################
def extract_orders():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_orders_dataset.parquet',
file_path= 'tmp/olist_orders_dataset.parquet'
)
extract_orders_task = PythonOperator(
task_id= "extract_orders",
python_callable= extract_orders,
dag= dag)
##################### olist_order_items_dataset #####################
def extract_order_items():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_order_items_dataset.parquet',
file_path= 'tmp/olist_order_items_dataset.parquet'
)
extract_order_items_task = PythonOperator(
task_id= "extract_order_items",
python_callable= extract_order_items,
dag= dag)
##################### olist_geolocation_dataset #####################
def extract_geolocation():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_geolocation_dataset.parquet',
file_path= 'tmp/olist_geolocation_dataset.parquet'
)
extract_geolocation_task = PythonOperator(
task_id= "extract_geolocation",
python_callable= extract_geolocation,
dag= dag)
def transform_data():
customers = pd.read_parquet('tmp/olist_customers_dataset.parquet')
orders = | pd.read_parquet('tmp/olist_orders_dataset.parquet') | pandas.read_parquet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.