prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import requests_html, openpyxl, ntpath, os, datetime
import PySimpleGUI as sg
import numpy as np
import pandas as pd
from pathlib import Path
from bs4 import BeautifulSoup as BSoup
from requests.exceptions import ConnectionError
from requests.exceptions import ReadTimeout
from openpyxl.utils.dataframe import dataframe_to_rows
## function to return slices of a list as a list of lists
## slices([1, 2, 3, 4, 5], 2) --> [[1,2], [3,4], [5]]
def slices(list, slice):
for i in range(0, len(list), slice):
yield list[i : i + slice]
## function to read the fasta file you want to blast
## returns content of the file as list of query strings of a length of 250 sequences
## returns the names of the sequences you searched to later pass them in to the result list
def fasta_to_string(fasta_path, query_size):
## open fasta file and read content
with open(fasta_path, 'r') as input:
query = input.read()
## extract the sequence names from the fasta file
sequence_names = list(slices(query.split('\n')[::2][:-1], query_size))
## split query into different lists each containing 250 sequences, which is fast to blast
## join them to strings afterwards because bold post methods expects a single string
query = query.split('\n')[:-1]
query = list(slices(query, query_size * 2))
query = ['\n'.join(sublist) for sublist in query]
## return query for blasting later and sequence names for adding to the results later
return query, sequence_names
## function to generate links from a list
def post_request(query, session):
seq_data = {
'tabtype': 'animalTabPane',
'historicalDB': '',
'searchdb': 'COX1',
'sequence': query
}
## send search request
r = session.post('https://boldsystems.org/index.php/IDS_IdentificationRequest', data = seq_data, timeout = 300)
## extract Top20 table links from the BOLD Result page
soup = BSoup(r.text, 'html5lib')
data = soup.find_all('span', style = 'text-decoration: none')
data = ['http://boldsystems.org' + data[i].get('result') for i in range(len(data))]
## return the data
return data
## function to fetch html from a list of links
## needs a progressbar variable to pass the progress bar of the window
def requests(url_list, progressbar):
html = []
with requests_html.HTMLSession() as session:
for url in url_list:
r = session.get(url)
html.append(r.text)
progressbar.UpdateBar(round(100 / len(url_list) * (url_list.index(url) + 1)))
return html
## function to convert returned html in dataframes
def save_as_df(html_list, sequence_names):
## create a soup of every result page that is returned by requests
soups = [BSoup(html, 'html5lib') for html in html_list]
## find the resulttable in the html
tables = [soup.find('table', class_ = 'resultsTable noborder') for soup in soups]
## if None is returned add a NoMatch table instead
## create nomatch table before creating rest of dataframes
nomatch_array = np.array([['No Match'] * 9 + [''] for i in range(20)])
nomatch_df = | pd.DataFrame(nomatch_array) | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from inspect import signature
from io import StringIO
import os
from pathlib import Path
import sys
import numpy as np
import pytest
from pandas.compat import PY310
from pandas.errors import (
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
compat,
)
import pandas._testing as tm
from pandas.io.parsers import TextFileReader
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self) -> None:
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser.engine = "c"
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = | DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
( | Series(['a']) | pandas.Series |
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from scipy import ndimage
import scipy.misc
from tqdm import tqdm
import utils.data as data
from utils.filename import *
from utils.image import *
from utils.params import *
from utils.preprocess import *
import utils.models as models
train_masks = data.read_train_masks()
""" Visualization utils"""
def vis_dataset(nrows = 5, ncols = 5, mask_alpha = 0.4, augment = False):
""" Sample some images from the dataset and show them in a grid."""
f, ax = plt.subplots(nrows = nrows, ncols = ncols, sharex = True, sharey = True, figsize=(20,20))
sampled_imgs = np.random.choice(train_masks['img'], nrows*ncols)
counter = 0
for i in range(nrows):
for j in range(ncols):
car_code, angle_code = filename_to_code(sampled_imgs[counter])
image = read_image(car_code, angle_code)
mask = read_image(car_code, angle_code, True)
if augment is True:
image = resize(image)
mask = resize(mask)
image = randomHueSaturationVariation(image, hue_shift_limit=(-50,50),
sat_shift_limit=(-5, 5), val_shift_limit=(-15, 15))
image, mask = randomShiftScaleRotate(image, mask, rotate_limit=(-5, 5))
image, mask = randomHorizontalFlip(image, mask)
ax[i, j].imshow(image)
ax[i, j].imshow(mask, alpha = mask_alpha)
counter += 1
plt.show()
# def vis_manufacturer_predictions(nrows = 5, ncols = 5):
# """ Sample some images from the dataset and show them in a grid."""
# model = models.get_manufacturer_model()
# model.load_weights('./models/manufacturer_model.best_weights.hdf5')
# f, ax = plt.subplots(nrows = nrows, ncols = ncols, sharex = True, sharey = True, figsize=(20,20))
# sampled_imgs = np.random.choice(train_masks['img'], nrows*ncols)
# counter = 0
# for i in range(nrows):
# for j in range(ncols):
# car_code, angle_code = filename_to_code(sampled_imgs[counter])
# image = read_image(car_code, angle_code)
# image = resize(image)
# x_batch = []
# x_batch.append(image)
# x_batch = np.array(x_batch, np.float32) /255
# pred = model.predict(x_batch).squeeze()
# ax[i, j].imshow(image)
# model.predict
# counter += 1
# plt.show()
# Sample some images from the dataset and show them in a grid
def vis_curropted_dataset():
""" List of incorrectly labeled images is given."""
""" Sample and show some images from it."""
curropted_masks = ['0d1a9caf4350_14', '1e89e1af42e7_07', '2a4a8964ebf3_08',
'2ea62c1beee7_03', '2faf504842df_03', '2faf504842df_12',
'3afec4b5ac07_05', '3afec4b5ac07_12', '3afec4b5ac07_13',
'3afec4b5ac07_14', '3bca821c8c41_13', '4a4364d7fc6d_06',
'4a4364d7fc6d_07', '4a4364d7fc6d_14', '4a4364d7fc6d_15',
'4baf50a3d8c2_05', '4e5ac4b9f074_11', '4f1f065d78ac_14',
'4f0397cf7937_05', '5df60cf7cab2_07', '5df60cf7cab2_15',
'6ba36af67cb0_07', '6bff9e10288e_01' ]
ncols = 2
nrows = 2
curropted_masks = np.random.choice(curropted_masks, ncols*nrows)
f, ax = plt.subplots(nrows = nrows, ncols = ncols, sharex = True, sharey = True, figsize=(20, 20))
sampled_imgs = np.random.choice(train_masks['img'], nrows*ncols)
counter = 0
for i in range(nrows):
for j in range(ncols):
if counter < len(curropted_masks):
car_code, angle_code = filename_to_code(curropted_masks[counter])
# print (car_code, angle_code)
image = read_image(car_code, angle_code)
ax[i, j].imshow(image)
mask = read_image(car_code, angle_code, True)
# mix = cv2.bitwise_and(image, image, mask = mask)
ax[i, j].imshow(mask, alpha = 0.9)
ax[i, j].set_title(curropted_masks[counter])
# ax[i, j].imshow(mix, cmap = 'Greys_r', alpha = 0.6)
counter += 1
plt.show()
def vis_manufacturer_distribution():
metadata = data.read_metadata()
metadata.index = metadata['id']
train_masks = data.read_train_masks()
train_ids = train_masks['img'].apply(lambda x: x[:-7])
train_ids = list(set(train_ids))
train_metadata = metadata.loc[train_ids]
plt.figure(figsize=(12, 10))
# train_metadata =
sns.countplot(y="make", data=train_metadata)
plt.show()
def plot_manufacturer_stats():
man_history_DF = pd.read_csv('logs/man.csv')
fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize=(10, 5))
ax = man_history_DF[['acc', 'val_acc']].plot(ax = axes[0]);
ax.set_title('model accuracy')
ax.set_xlabel('epoch')
ax.set_ylabel('accuracy')
# pd.DataFrame(baseline_history.history)[['acc', 'val_acc']].plot()
ax = man_history_DF[['loss', 'val_loss']].plot(ax = axes[1])
ax.set_title('model loss')
ax.set_xlabel('epoch')
ax.set_ylabel('loss')
fig.show()
def plot_final_results():
""" Plots a comparison score between the three models"""
""" The results are hardcoded here for simplicity."""
res = [[0.7491, 0.743401], [0.8848, 0.8894190], [0.9886, 0.989057]]
res_pd = | pd.DataFrame(res) | pandas.DataFrame |
#!/usr/bin/env python
"""
Command-line tool to control the concavity constraining tools
Mudd et al., 2018
So far mostly testing purposes
B.G.
"""
from lsdtopytools import LSDDEM # I am telling python I will need this module to run.
from lsdtopytools import argparser_debug as AGPD # I am telling python I will need this module to run.
from lsdtopytools import quickplot as qp, quickplot_movern as qmn # We will need the plotting routines
import time as clock # Basic benchmarking
import sys # manage the argv
import pandas as pd
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial
class NoBasinFoundError(Exception):
pass
def main_concavity():
# Here are the different parameters and their default value fr this script
default_param = AGPD.get_common_default_param()
default_param["quick_movern"] = False
default_param["X"] = None
default_param["Y"] = None
default_param = AGPD.ingest_param(default_param, sys.argv)
# Checking the param
if(isinstance(default_param["X"],str)):
X = [float(default_param["X"])]
Y = [float(default_param["Y"])]
else:
try:
X = [float(i) for i in default_param["X"]]
Y = [float(i) for i in default_param["Y"]]
except:
pass
if(default_param["help"] or len(sys.argv)==2 or "help" in sys.argv ):
print("""
This command-line tool run concavity analysis tools from LSDTopoTools.
Description of the algorithms in Mudd et al., 2018 -> https://www.earth-surf-dynam.net/6/505/2018/
To use, run the script with relevant options, for example:
lsdtt-concavity-tools.py file=myraster.tif quick_movern X=5432 Y=78546
option available:
file: name of the raster (file=name.tif)
path: path to the file (default = current folder)
quick_movern: run disorder metrics and plot a result figure (you jsut need to write it)
X: X coordinate of the outlet (So far only single basin is supported as this is an alpha tool)
Y: Y coordinate of the outlet (So far only single basin is supported as this is an alpha tool)
help: if written, diplay this message. Documentation soon to be written.
""")
quit()
print("Welcome to the command-line tool to constrain your river network concavity. Refer to Mudd et al., 2018 -> https://www.earth-surf-dynam.net/6/505/2018/ for details about these algorithms.")
print("Let me first load the raster ...")
try:
mydem = LSDDEM(file_name = default_param["file"], path=default_param["path"], already_preprocessed = False, verbose = False)
except:
print("Testing data still to build")
print("Got it. Now dealing with the depressions ...")
mydem.PreProcessing(filling = True, carving = True, minimum_slope_for_filling = 0.0001) # Unecessary if already preprocessed of course.
print("Done! Extracting the river network")
mydem.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = 1000)
print("I have some rivers for you! Defining the watershed of interest...")
mydem.DefineCatchment( method="from_XY", X_coords = X, Y_coords = Y, test_edges = False, coord_search_radius_nodes = 25, coord_threshold_stream_order = 1)
print("I should have it now.")
print("I got all the common info, now I am running what you want!")
if(default_param["quick_movern"]):
print("Initialising Chi-culations (lol)")
mydem.GenerateChi()
print("Alright, getting the disorder metrics for each chi values. LSDTopoTools can split a lot of messages, sns.")
mydem.cppdem.calculate_movern_disorder(0.15, 0.05, 17, 1, 1000) # start theta, delta, n, A0, threashold
print("I am done, plotting the results now")
qmn.plot_disorder_results(mydem, legend = False, normalise = True, cumulative_best_fit = False)
qp.plot_check_catchments(mydem)
qmn.plot_disorder_map(mydem, cmap = "RdBu_r")
print("FInished with quick disorder metric")
print("Finished!")
def temp_concavity_FFS_all():
# Here are the different parameters and their default value fr this script
default_param = AGPD.get_common_default_param()
default_param["already_preprocessed"] = False
default_param["process_main"] = False
default_param["X"] = None
default_param["Y"] = None
default_param["AT"] = None
default_param["ATM"] = None
default_param["n_proc"] = None
default_param["min_DA"] = None
default_param["max_DA"] = None
default_param["prefix"] = ""
prefix = default_param["prefix"]
default_param = AGPD.ingest_param(default_param, sys.argv)
if(default_param["help"] or len(sys.argv)==1 or "help" in sys.argv):
print("""
Experimental command-line tools for concavity constraining. Deprecated now.
""")
quit()
X = float(default_param["X"])
Y = float(default_param["Y"])
min_DA = float(default_param["min_DA"])
max_DA = float(default_param["max_DA"])
area_threshold_main_basin = float(default_param["ATM"])
area_threshold = float(default_param["AT"])
n_proc = int(default_param["n_proc"])
dem_name = default_param["file"]
if(default_param["process_main"]):
lsd.concavity_automator.process_main_basin(dem_name, dem_path = "./" ,already_preprocessed = True , X_outlet = X, Y_outlet = Y, area_threshold = area_threshold, area_threshold_to_keep_river = area_threshold_main_basin, prefix = default_param["prefix"])
else:
lsd.concavity_automator.get_all_concavity_in_range_of_DA_from_baselevel(dem_name, dem_path = "./" ,already_preprocessed = True , X_outlet = X, Y_outlet = Y, min_DA = min_DA, max_DA = max_DA, area_threshold = area_threshold, n_proc = n_proc, prefix = default_param["prefix"])
lsd.concavity_automator.post_process_basins(prefix = default_param["prefix"])
for key,val in default_param.items():
default_param[key] = [val]
pd.DataFrame(default_param).to_csv(prefix +"log_concavity_FFS_sstuff.txt", index = False)
def concavity_single_basin():
"""
Command-line tool to constrain concavity for a single basin from its XY coordinates using Normalised disorder method.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
X=234 -> X Coordinate (in map unit) of the outlet (needs to be the exact pixel at the moment, will add a snapping option later)
Y=234 -> Y Coordinate (in map unit) of the outlet (needs to be the exact pixel at the moment, will add a snapping option later)
AT=5000 -> Area threshold in number of pixel to initiate a river: lower nummber <=> denser network (quickly increases the processing time)
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
mode=g -> DEFAULT is g . Processing mode: can be "g" for generating data, "p" or plotting previously generated data or "all" for everything.
Example:
lsdtt-concFFS-single file=DEM.tif already_preprocessed X=43422 Y=5353497 AT=2500 prefix=FeatherRiver mode=all
"""
# Here are the different parameters and their default value fr this script
default_param = AGPD.get_common_default_param()
default_param["already_preprocessed"] = False
default_param["X"] = None
default_param["Y"] = None
default_param["AT"] = None
default_param["prefix"] = ""
default_param["mode"] = "g"
default_param["n_tribs_by_combinations"] = 3
# Ingesting the parameters
default_param = AGPD.ingest_param(default_param, sys.argv)
prefix = default_param["prefix"]
if(default_param["help"] or len(sys.argv)==1 or "help" in sys.argv):
print("""
Command-line tool to constrain concavity for a single basin from its XY coordinates using Normalised disorder method.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
X=234 -> X Coordinate (in map unit) of the outlet (needs to be the exact pixel at the moment, will add a snapping option later)
Y=234 -> Y Coordinate (in map unit) of the outlet (needs to be the exact pixel at the moment, will add a snapping option later)
AT=5000 -> Area threshold in number of pixel to initiate a river: lower nummber <=> denser network (quickly increases the processing time)
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
mode=g -> DEFAULT is g . Processing mode: can be "g" for generating data, "p" or plotting previously generated data or "all" for everything.
Example:
lsdtt-concFFS-single file=DEM.tif already_preprocessed X=43422 Y=5353497 AT=2500 prefix=FeatherRiver mode=all
""")
quit()
# Reformatting some values that sometimes are not formatted correctly
X = float(default_param["X"])
Y = float(default_param["Y"])
area_threshold = float(default_param["AT"])
dem_name = default_param["file"]
default_param["n_tribs_by_combinations"] = int(default_param["n_tribs_by_combinations"])
# Wrapper to the processing function (the convoluted ls method makes multiprocessing easier when processing several basins)
ls = [0,X,Y,area_threshold,prefix]
# Calling th requested codes
if("all" in default_param["mode"].lower() or "g" in default_param["mode"].lower()):
lsd.concavity_automator.process_basin(ls,ignore_numbering = True, overwrite_dem_name = dem_name, n_tribs_by_combo = default_param["n_tribs_by_combinations"])
if("all" in default_param["mode"].lower() or "p" in default_param["mode"].lower()):
lsd.concavity_automator.plot_basin(ls,ignore_numbering = True, overwrite_dem_name = dem_name)
# Saving a log of processing
for key,val in default_param.items():
default_param[key] = [val]
pd.DataFrame(default_param).to_csv(prefix +"log_concavity_FFS_single_basin_sstuff.txt", index = False)
def concavity_multiple_basin():
"""
Command-line tool to constrain concavity for a multiple basin from their XY coordinates using Normalised disorder method.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
csv=outlets.csv -> Name of the csv file containing the following columns: "X", "Y" and "area_threshold" for each basins to investigate. Can be generated automatically from lsdtt-concFFS-spawn-outlets
n_proc=4 -> DEFAULT is 1. Number of processors to use in parallel when possible.
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
mode=g -> DEFAULT is g . Processing mode: can be "g" for generating data, "p" or plotting previously generated data, "d" for plotting disorder map (WARNING takes time and memory) "all" for everything.
Example:
lsdtt-concFFS-multiple file=DEM.tif already_preprocessed csv=FeatherRiveroutlets.csv prefix=FeatherRiver mode=g
"""
# Here are the different parameters and their default value fr this script
default_param = AGPD.get_common_default_param()
default_param["already_preprocessed"] = False
default_param["prefix"] = ""
default_param["csv"] = ""
default_param["n_proc"] = 1
default_param["area_thershold_basin_extraction"] = 500
default_param["precipitation_file"] = ""
default_param["mode"] = "g"
default_param["n_tribs_by_combinations"] = 3
default_param = AGPD.ingest_param(default_param, sys.argv)
prefix = default_param["prefix"]
if(default_param["help"] or len(sys.argv)==1 or "help" in sys.argv):
print("""
Command-line tool to constrain concavity for a multiple basin from their XY coordinates using Normalised disorder method.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
csv=outlets.csv -> Name of the csv file containing the following columns: "X", "Y" and "area_threshold" for each basins to investigate. Can be generated automatically from lsdtt-concFFS-spawn-outlets
n_proc=4 -> DEFAULT is 1. Number of processors to use in parallel when possible.
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
mode=g -> DEFAULT is g . Processing mode: can be "g" for generating data, "p" or plotting previously generated data, "all" for everything.
Example:
lsdtt-concFFS-multiple file=DEM.tif already_preprocessed csv=FeatherRiveroutlets.csv prefix=FeatherRiver mode=g
""")
# Reading the csv file
df = pd.read_csv(default_param["csv"])
# Reformatting stuff
n_proc = int(default_param["n_proc"])
default_param["n_tribs_by_combinations"] = int(default_param["n_tribs_by_combinations"])
area_thershold_basin_extraction = float(default_param["area_thershold_basin_extraction"])
dem_name = default_param["file"]
if(default_param["precipitation_file"] == ""):
precipitation = False
else:
precipitation = True
# Processing options
if("all" in default_param["mode"].lower() or "g" in default_param["mode"].lower()):
lsd.concavity_automator.process_multiple_basins(dem_name, dem_path = "./",already_preprocessed = default_param["already_preprocessed"],
prefix = default_param["prefix"], X_outlets = df["X"].values, Y_outlets = df["Y"].values, n_proc = n_proc, area_threshold = df["area_threshold"].values,
area_thershold_basin_extraction = area_thershold_basin_extraction, n_tribs_by_combo = default_param["n_tribs_by_combinations"],
use_precipitation_raster = precipitation, name_precipitation_raster = default_param["precipitation_file"])
lsd.concavity_automator.post_process_analysis_for_Dstar(default_param["prefix"], n_proc = n_proc, base_raster_full_name = dem_name)
if("z" in default_param["mode"].lower()):
lsd.concavity_automator.post_process_analysis_for_Dstar(default_param["prefix"], n_proc = n_proc, base_raster_full_name = dem_name)
if("p" in default_param["mode"].lower()):
lsd.concavity_automator.plot_main_figures(default_param["prefix"])
if("all" in default_param["mode"].lower() or "d" in default_param["mode"].lower()):
lsd.concavity_automator.plot_multiple_basins(dem_name, dem_path = "./",already_preprocessed = default_param["already_preprocessed"],
prefix = default_param["prefix"], X_outlets = df["X"].values, Y_outlets = df["Y"].values, n_proc = n_proc, area_threshold = df["area_threshold"].values,
area_thershold_basin_extraction = area_thershold_basin_extraction, plot_Dstar = False, n_tribs_by_combo = default_param["n_tribs_by_combinations"])
lsd.concavity_automator.plot_Dstar_maps_for_all_concavities(default_param["prefix"], n_proc = n_proc)
# Saving logs
for key,val in default_param.items():
default_param[key] = [val]
pd.DataFrame(default_param).to_csv(prefix +"log_concavity_FFS_multiple_basin_sstuff.txt", index = False)
def spawn_XY_outlet():
"""
Command-line tool to prechoose the basins used for other analysis. Outputs a file with outlet coordinates readable from other command-line tools and a basin perimeter csv readable by GISs to if the basins corresponds to your needs.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
test_edges -> OPTIONAL will test if the basin extracted are potentially influenced by nodata and threfore uncomplete. WARNING, will take out ANY basin potentially cut, if you know what you are doing, you can turn off.
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
method=from_range -> DEFAULT from_range: determine the method to select basin. Can be
from_range -> select largest basins bigger than min_DA but smaller than max_DA (in m^2)
min_area -> select largest basins bigger than min_DA
main_basin -> select the largest basin
Other methods to come.
min_elevation=45 -> DEFAULT 0. Ignore any basin bellow that elevation
area_threshold=3500 -> DEFAULT 5000. River network area threshold in number of pixels (part of the basin selection is based on river junctions HIGHLY sensitive to that variable).
Example:
lsdtt-concFFS-spawn-outlets file=DEM.tif already_preprocessed min_DA=1e7 max_DA=1e9 area_threshold=3500
"""
default_param = AGPD.get_common_default_param()
default_param["already_preprocessed"] = False
default_param["test_edges"] = False
default_param["area_threshold"] = 5000
default_param["method"] = "from_range"
default_param["min_DA"] = 1e6
default_param["max_DA"] = 1e9
default_param["min_elevation"] = 0;
default_param["prefix"] = "";
default_param = AGPD.ingest_param(default_param, sys.argv)
choice_of_method = ["min_area", "main_basin","from_range"]
if(default_param["help"] or len(sys.argv)==1 or "help" in sys.argv):
print("""
Command-line tool to prechoose the basins used for other analysis. Outputs a file with outlet coordinates readable from other command-line tools and a basin perimeter csv readable by GISs to if the basins corresponds to your needs.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
test_edges -> OPTIONAL will test if the basin extracted are potentially influenced by nodata and threfore uncomplete. WARNING, will take out ANY basin potentially cut, if you know what you are doing, you can turn off.
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
method=from_range -> DEFAULT from_range: determine the method to select basin. Can be
from_range -> select largest basins bigger than min_DA but smaller than max_DA (in m^2)
min_area -> select largest basins bigger than min_DA
main_basin -> select the largest basin
Other methods to come.
min_elevation=45 -> DEFAULT 0. Ignore any basin bellow that elevation
area_threshold=3500 -> DEFAULT 5000. River network area threshold in number of pixels (part of the basin selection is based on river junctions HIGHLY sensitive to that variable).
Example:
lsdtt-concFFS-spawn-outlets file=DEM.tif already_preprocessed min_DA=1e7 max_DA=1e9 area_threshold=3500
""")
return 0;
# Checks if the method requested is valid or not
if(default_param["method"].lower() not in choice_of_method):
print("I cannot recognise the method! Please choose from:")
print(choice_of_method)
return 0
# Formatting parameters
area_threshold = int(default_param["area_threshold"])
min_DA = float(default_param["min_DA"])
max_DA = float(default_param["max_DA"])
min_elevation = float(default_param["min_elevation"])
# Reading DEM
mydem = LSDDEM(file_name = default_param["file"], path = default_param["path"], already_preprocessed = default_param["already_preprocessed"], remove_seas = True, sea_level = min_elevation)
if(default_param["already_preprocessed"] == False):
mydem.PreProcessing()
# Extracting basins
mydem.CommonFlowRoutines()
print("Done with flow routines")
mydem.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# Get the outlet coordinates of all the extracted basins
print("Extracted rivers")
df_outlet = mydem.DefineCatchment( method = default_param["method"], min_area = min_DA, max_area = max_DA, test_edges = default_param["test_edges"])#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("Extracted")
for key,val in df_outlet.items():
df_outlet[key] = np.array(df_outlet[key])
# Getting the rivers
mydem.GenerateChi(theta = 0.4, A_0 = 1)
# Saing the rivers to csv
mydem.df_base_river.to_csv(default_param["prefix"]+"rivers.csv", index = False)
#Saving the outlet
df_outlet["area_threshold"] = np.full(df_outlet["X"].shape[0],area_threshold)
# print(df_outlet)
pd.DataFrame(df_outlet).to_csv(default_param["prefix"]+"outlets.csv", index = False)
# Getting the perimeter of basins
this = mydem.cppdem.extract_perimeter_of_basins()
df_perimeter = {"X":[],"Y":[],"Z":[],"IDs":[]}
for key,val in this.items():
df_perimeter["X"].append(np.array(val["X"]))
df_perimeter["Y"].append(np.array(val["Y"]))
df_perimeter["Z"].append(np.array(val["Z"]))
df_perimeter["IDs"].append(np.full(np.array(val["Z"]).shape[0], key))
for key,val in df_perimeter.items():
df_perimeter[key] = np.concatenate(val)
pd.DataFrame(df_perimeter).to_csv(default_param["prefix"]+"perimeters.csv", index = False)
def spawn_XY_outlet_subbasins():
default_param = AGPD.get_common_default_param()
default_param["already_preprocessed"] = False
default_param["X"] = 0
default_param["Y"] = 0
default_param["area_threshold"] = 5000
default_param["min_DA"] = 1e6
default_param["max_DA"] = 1e9
default_param["min_elevation"] = 0;
default_param["prefix"] = "";
default_param = AGPD.ingest_param(default_param, sys.argv)
if(default_param["help"] or len(sys.argv)==1 or "help" in sys.argv):
print("""
Command-line tool to extract basin information about all the subbasins within a main one. Outputs a file with outlet coordinates readable from other command-line tools and a basin perimeter csv readable by GISs to if the basins corresponds to your needs.
Takes several arguments (the values after = are example values to adapt):
file=NameOfFile.tif -> The code NEEDS the neame of the raster to process.
already_preprocessed -> OPTIONAL Tell the code your raster does not need preprocessing, otherwise carve the DEM (see lsdtt-depressions for more options)
prefix=test -> OPTIONAL Add a prefix to each outputted file (handy for automation)
min_elevation=45 -> DEFAULT 0. Ignore any basin bellow that elevation
area_threshold=3500 -> DEFAULT 5000. River network area threshold in number of pixels (part of the basin selection is based on river junctions HIGHLY sensitive to that variable).
min_DA=1e7 -> minimum drainage area to extract a subbasin
max_DA=1e9 -> maximum drainage area for a subbasin
X=234 -> X Coordinate (in map unit) of the outlet (needs to be the exact pixel at the moment, will add a snapping option later)
Y=234 -> Y Coordinate (in map unit) of the outlet (needs to be the exact pixel at the moment, will add a snapping option later)
Example:
lsdtt-concFFS-spawn-outlets file=DEM.tif already_preprocessed min_DA=1e7 max_DA=1e9 area_threshold=3500
""")
return 0;
area_threshold = int(default_param["area_threshold"])
X = float(default_param["X"])
min_DA = float(default_param["min_DA"])
Y = float(default_param["Y"])
max_DA = float(default_param["max_DA"])
min_elevation = float(default_param["min_elevation"])
mydem = LSDDEM(file_name = default_param["file"], path = default_param["path"], already_preprocessed = default_param["already_preprocessed"], remove_seas = True, sea_level = min_elevation)
if(default_param["already_preprocessed"] == False):
mydem.PreProcessing()
# Extracting basins
mydem.CommonFlowRoutines()
mydem.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# df_outlet = mydem.DefineCatchment( method = default_param["method"], min_area = min_DA, max_area = max_DA, test_edges = default_param["test_edges"])#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
df_outlet = mydem.cppdem.calculate_outlets_min_max_draining_to_baselevel(X, Y, min_DA, max_DA,500)
mydem.check_catchment_defined = True
for key,val in df_outlet.items():
df_outlet[key] = np.array(df_outlet[key])
mydem.GenerateChi(theta = 0.4, A_0 = 1)
mydem.df_base_river.to_csv(default_param["prefix"]+"rivers.csv", index = False)
df_outlet["area_threshold"] = np.full(df_outlet["X"].shape[0],area_threshold)
df_outlet = pd.DataFrame(df_outlet)
df_outlet.to_csv(default_param["prefix"]+"outlets.csv", index = False)
df_outlet["ID"] = np.array(list(range(df_outlet.shape[0])))
this = mydem.cppdem.extract_perimeter_of_basins()
df_perimeter = {"X":[],"Y":[],"Z":[],"IDs":[]}
for key,val in this.items():
df_perimeter["X"].append(np.array(val["X"]))
df_perimeter["Y"].append(np.array(val["Y"]))
df_perimeter["Z"].append(np.array(val["Z"]))
df_perimeter["IDs"].append(np.full(np.array(val["Z"]).shape[0], key))
## Log from the analysis
for key,val in df_perimeter.items():
df_perimeter[key] = np.concatenate(val)
| pd.DataFrame(df_perimeter) | pandas.DataFrame |
from bokeh.models import HoverTool
from bokeh.io import curdoc
from bokeh.layouts import column,row
import pandas as pd
from statement_parser import parse_ofx_statements
import holoviews as hv
from bokeh.models.formatters import DatetimeTickFormatter
pd.options.plotting.backend = 'holoviews'
merged_df = parse_ofx_statements()
def extract_month_year(date):
return f"{date.month_name()}, {date.year}"
def render_basic_graphs(df, transaction_type):
render_list = []
df['amount'] = abs(df['amount']).astype(float)
time_hover = HoverTool(tooltips=[("amount", "$@amount{0,0.00}"), ("date", '@date_formatted')])
time_line = df.plot.line(x='date', y='amount', logy=True, yformatter='$%.00f', title=f"{transaction_type} over time (Log scale)").opts(tools=[time_hover])
render_list.append(hv.render(time_line, backend='bokeh'))
cat_hover = HoverTool(tooltips=[("amount", "$@amount{0,0.00}"), ("category", "@category")])
categories_bar = df.groupby(['category'])['amount'].sum().plot.bar(x='category', y='amount',
yformatter='$%.00f', rot=90, title=f"{transaction_type} by Category").opts(tools=[cat_hover],
color='category', cmap='Category10')
render_list.append(hv.render(categories_bar, backend="bokeh"))
return render_list
def render_advanced_graphs(df, transaction_type):
render_list = []
df['amount'] = abs(df['amount']).astype(float)
cat_hover = HoverTool(tooltips=[("amount", "$@amount{0,0.00}"), ("category", "@category")])
df.set_index(pd.DatetimeIndex(df['date']), inplace=True)
df = df.groupby([pd.Grouper(freq='M'), 'category'])['amount'].sum()
formatter = DatetimeTickFormatter(days="%d-%b-%Y", months='%m/%Y', years='%m/%Y')
stacked_bar = df.plot.bar(stacked=True, xformatter=formatter, yformatter='$%.00f',
title=f"{transaction_type} per Category by Month").opts(tools=[cat_hover])
render_list.append(hv.render(stacked_bar, backend='bokeh'))
return render_list
def render_income_vs_expense_graphs(df):
render_list = []
df['amount'] = abs(df['amount']).astype(float)
time_hover = HoverTool(tooltips=[("amount", "$@amount{0,0.00}"), ("date", '@date_formatted')])
time_line = df.plot.line(x='date', y='amount', by='type', yformatter='$%.00f',
title="Income vs Expenses over time").opts(tools=[time_hover])
render_list.append(hv.render(time_line, backend='bokeh'))
cat_hover = HoverTool(tooltips=[("amount", "$@amount{0,0.00}"), ("type", "@type")])
df.set_index(pd.DatetimeIndex(df['date']), inplace=True)
df = df.groupby([pd.Grouper(freq='M'), 'type'])['amount'].sum()
df.to_csv("temp.csv")
df3 = | pd.read_csv("temp.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
from rdtools import energy_from_power
import pytest
# Tests for resampling at same frequency
def test_energy_from_power_calculation():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_max_interval():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=np.nan, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('5 minutes'))
# We expect series of NaNs, because max_interval_hours is smaller than the
# time step of the power time series
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_validation():
power_series = pd.Series(data=[4.0] * 4)
with pytest.raises(ValueError):
energy_from_power(power_series, max_timedelta= | pd.to_timedelta('15 minutes') | pandas.to_timedelta |
"""
"""
from __future__ import print_function
from future.utils import listvalues
import random
from KSIF.core import utils
from .utils import fmtp, fmtn, fmtpn, get_period_name
import numpy as np
import pandas as pd
from pandas.core.base import PandasObject
from tabulate import tabulate
from matplotlib import pyplot as plt
import sklearn.manifold
import sklearn.cluster
import sklearn.covariance
from scipy.optimize import minimize
import scipy.stats
from scipy.stats import t
try:
import prettyplotlib # NOQA
except ImportError:
pass
__author__ = '<NAME>'
__email__ = '<EMAIL>'
def set_riskfree_rate(rf, update_all=False):
"""
Set annual risk-free rate property of the class PerformanceStats.
Affects all instances of the PerformanceStats, unless the default
risk-free rate was overwritten before.
Args:
* rf (float): Annual interest rate
* update_all (bool): If True, all instances of PerformanceStats
will update their values. Note, this might be very slow
time in case of many objects available
"""
# Note that both daily and monthly rates are annualized in the same
# way as returns
PerformanceStats._yearly_rf = rf
PerformanceStats._monthly_rf = (np.power(1+rf, 1./12.) - 1.) * 12
PerformanceStats._daily_rf = (np.power(1+rf, 1./252.) - 1.) * 252
if update_all:
from gc import get_objects
for obj in get_objects():
if isinstance(obj, PerformanceStats):
obj.set_riskfree_rate(rf)
class PerformanceStats(object):
"""
PerformanceStats is a convenience class used for the performance
evaluation of a price series. It contains various helper functions
to help with plotting and contains a large amount of descriptive
statistics.
Args:
* prices (Series): A price series.
Attributes:
* name (str): Name, derived from price series name
* return_table (DataFrame): A table of monthly returns with
YTD figures as well.
* lookback_returns (Series): Returns for different
lookback periods (1m, 3m, 6m, ytd...)
* stats (Series): A series that contains all the stats
"""
# Annual risk-free rate for the calculation of Sharpe ratio.
# By default is equal to 0%
_yearly_rf = 0.
_monthly_rf = 0.
_daily_rf = 0.
def __init__(self, prices):
super(PerformanceStats, self).__init__()
self.prices = prices
self.name = self.prices.name
self._start = self.prices.index[0]
self._end = self.prices.index[-1]
self._update(self.prices)
def set_riskfree_rate(self, rf):
"""
Set annual risk-free rate property and calculate properly annualized
monthly and daily rates. Then performance stats are recalculated.
Affects only this instance of the PerformanceStats.
Args:
* rf (float): Annual risk-free rate
"""
self._yearly_rf = rf
self._monthly_rf = (np.power(1+rf, 1./12.) - 1.) * 12
self._daily_rf = (np.power(1+rf, 1./252.) - 1.) * 252
# Note, that we recalculate everything.
self._update(self.prices)
def _update(self, obj):
# calc
self._calculate(obj)
# update derived structure
# return table as dataframe for easier manipulation
self.return_table = pd.DataFrame(self.return_table).T
# name columns
if len(self.return_table.columns) == 13:
self.return_table.columns = ['Jan', 'Feb', 'Mar', 'Apr', 'May',
'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
'Nov', 'Dec', 'YTD']
self.lookback_returns = pd.Series(
[self.mtd, self.three_month, self.six_month, self.ytd,
self.one_year, self.three_year, self.five_year,
self.ten_year, self.cagr],
['mtd', '3m', '6m', 'ytd', '1y', '3y', '5y', '10y', 'incep'])
self.lookback_returns.name = self.name
st = self._stats()
self.stats = pd.Series(
[getattr(self, x[0]) for x in st if x[0] is not None],
[x[0] for x in st if x[0] is not None]).drop_duplicates()
def _calculate(self, obj):
# default values
self.daily_mean = np.nan
self.daily_vol = np.nan
self.daily_sharpe = np.nan
self.best_day = np.nan
self.worst_day = np.nan
self.total_return = np.nan
self.cagr = np.nan
self.incep = np.nan
self.drawdown = np.nan
self.max_drawdown = np.nan
self.drawdown_details = np.nan
self.daily_skew = np.nan
self.daily_kurt = np.nan
self.monthly_returns = np.nan
self.avg_drawdown = np.nan
self.avg_drawdown_days = np.nan
self.monthly_mean = np.nan
self.monthly_vol = np.nan
self.monthly_sharpe = np.nan
self.best_month = np.nan
self.worst_month = np.nan
self.mtd = np.nan
self.three_month = np.nan
self.pos_month_perc = np.nan
self.avg_up_month = np.nan
self.avg_down_month = np.nan
self.monthly_skew = np.nan
self.monthly_kurt = np.nan
self.six_month = np.nan
self.yearly_returns = np.nan
self.ytd = np.nan
self.one_year = np.nan
self.yearly_mean = np.nan
self.yearly_vol = np.nan
self.yearly_sharpe = np.nan
self.best_year = np.nan
self.worst_year = np.nan
self.three_year = np.nan
self.win_year_perc = np.nan
self.twelve_month_win_perc = np.nan
self.yearly_skew = np.nan
self.yearly_kurt = np.nan
self.five_year = np.nan
self.ten_year = np.nan
self.return_table = {}
# end default values
if len(obj) is 0:
return
self.start = obj.index[0]
self.end = obj.index[-1]
# save daily prices for future use
self.daily_prices = obj
# M = month end frequency
self.monthly_prices = obj.resample('M').last()
# A == year end frequency
self.yearly_prices = obj.resample('A').last()
# let's save some typing
p = obj
mp = self.monthly_prices
yp = self.yearly_prices
if len(p) is 1:
return
# stats using daily data
self.returns = p.to_returns()
self.log_returns = p.to_log_returns()
r = self.returns
if len(r) < 2:
return
self.daily_mean = r.mean() * 252
self.daily_vol = r.std() * np.sqrt(252)
self.daily_sharpe = (self.daily_mean - self._daily_rf) / self.daily_vol
self.best_day = r.max()
self.worst_day = r.min()
self.total_return = obj[-1] / obj[0] - 1
# save ytd as total_return for now - if we get to real ytd
# then it will get updated
self.ytd = self.total_return
self.cagr = calc_cagr(p)
self.incep = self.cagr
self.drawdown = p.to_drawdown_series()
self.max_drawdown = self.drawdown.min()
self.drawdown_details = drawdown_details(self.drawdown)
if self.drawdown_details is not None:
self.avg_drawdown = self.drawdown_details['drawdown'].mean()
self.avg_drawdown_days = self.drawdown_details['days'].mean()
if len(r) < 4:
return
self.daily_skew = r.skew()
# if all zero/nan kurt fails division by zero
if len(r[(~np.isnan(r)) & (r != 0)]) > 0:
self.daily_kurt = r.kurt()
# stats using monthly data
self.monthly_returns = self.monthly_prices.to_returns()
mr = self.monthly_returns
if len(mr) < 2:
return
self.monthly_mean = mr.mean() * 12
self.monthly_vol = mr.std() * np.sqrt(12)
self.monthly_sharpe = ((self.monthly_mean - self._monthly_rf) /
self.monthly_vol)
self.best_month = mr.max()
self.worst_month = mr.min()
# -2 because p[-1] will be mp[-1]
self.mtd = p[-1] / mp[-2] - 1
# -1 here to account for first return that will be nan
self.pos_month_perc = len(mr[mr > 0]) / float(len(mr) - 1)
self.avg_up_month = mr[mr > 0].mean()
self.avg_down_month = mr[mr <= 0].mean()
# return_table
for idx in mr.index:
if idx.year not in self.return_table:
self.return_table[idx.year] = {1: 0, 2: 0, 3: 0,
4: 0, 5: 0, 6: 0,
7: 0, 8: 0, 9: 0,
10: 0, 11: 0, 12: 0}
if not np.isnan(mr[idx]):
self.return_table[idx.year][idx.month] = mr[idx]
# add first month
fidx = mr.index[0]
try:
self.return_table[fidx.year][fidx.month] = float(mp[0]) / p[0] - 1
except ZeroDivisionError:
self.return_table[fidx.year][fidx.month] = 0
# calculate the YTD values
for idx in self.return_table:
arr = np.array(listvalues(self.return_table[idx]))
self.return_table[idx][13] = np.prod(arr + 1) - 1
if len(mr) < 3:
return
denom = p[:p.index[-1] - pd.DateOffset(months=3)]
if len(denom) > 0:
self.three_month = p[-1] / denom[-1] - 1
if len(mr) < 4:
return
self.monthly_skew = mr.skew()
# if all zero/nan kurt fails division by zero
if len(mr[(~np.isnan(mr)) & (mr != 0)]) > 0:
self.monthly_kurt = mr.kurt()
denom = p[:p.index[-1] - pd.DateOffset(months=6)]
if len(denom) > 0:
self.six_month = p[-1] / denom[-1] - 1
self.yearly_returns = self.yearly_prices.to_returns()
yr = self.yearly_returns
if len(yr) < 2:
return
self.ytd = p[-1] / yp[-2] - 1
denom = p[:p.index[-1] - pd.DateOffset(years=1)]
if len(denom) > 0:
self.one_year = p[-1] / denom[-1] - 1
self.yearly_mean = yr.mean()
self.yearly_vol = yr.std()
self.yearly_sharpe = ((self.yearly_mean - self._yearly_rf) /
self.yearly_vol)
self.best_year = yr.max()
self.worst_year = yr.min()
# annualize stat for over 1 year
self.three_year = calc_cagr(p[p.index[-1] - pd.DateOffset(years=3):])
# -1 here to account for first return that will be nan
self.win_year_perc = len(yr[yr > 0]) / float(len(yr) - 1)
tot = 0
win = 0
for i in range(11, len(mr)):
tot = tot + 1
if mp[i] / mp[i - 11] > 1:
win = win + 1
self.twelve_month_win_perc = float(win) / tot
if len(yr) < 4:
return
self.yearly_skew = yr.skew()
# if all zero/nan kurt fails division by zero
if len(yr[(~np.isnan(yr)) & (yr != 0)]) > 0:
self.yearly_kurt = yr.kurt()
self.five_year = calc_cagr(p[p.index[-1] - pd.DateOffset(years=5):])
self.ten_year = calc_cagr(p[p.index[-1] - | pd.DateOffset(years=10) | pandas.DateOffset |
# Copyright 2018 Twitter, Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
""" This module contains classes and methods for extracting metrics from the
Heron Topology Master instance. """
import logging
import warnings
import datetime as dt
from typing import Dict, List, Any, Callable, Union, Tuple, Optional
import pandas as pd
from requests.exceptions import HTTPError
from caladrius.metrics.heron.client import HeronMetricsClient
from caladrius.common.heron import tracker
from caladrius.config.keys import ConfKeys
LOG: logging.Logger = logging.getLogger(__name__)
# pylint: disable=too-many-locals, too-many-arguments
# Type definitions
ROW_DICT = Dict[str, Union[str, int, float, dt.datetime, None]]
# The TMaster metrics are aggregated into minute long periods by default
DEFAULT_METRIC_PERIOD: int = 60
def time_check(start: dt.datetime, end: dt.datetime,
time_limit_hrs: float) -> None:
""" Checks the time period, defined by the supplied start and end points,
against the period defined from now back by the supplied time limit in
hours. If the time check passes then nothing will be returned.
Arguments:
start (datetime): The start of the time period. Should be UTC.
end (datetime): The end of the time period. Should be UTC.
time_limit_hrs (float): The number of hours back from now that define
the allowed time period.
Raises:
RuntimeError: If the supplied time period is not within the defined
limit or if the end time is before the start time.
RuntimeWarning: If the supplied time period crosses the limits of the
metrics storage period.
"""
if end < start:
msg: str = (f"The supplied end time ({end.isoformat}) is before the "
f"supplied start time ({start.isoformat}). No data will "
f"be returned.")
LOG.error(msg)
raise RuntimeError(msg)
now: dt.datetime = dt.datetime.now(dt.timezone.utc)
limit: dt.datetime = now - dt.timedelta(hours=time_limit_hrs)
if start < limit and end < limit:
limit_msg: str = (f"The defined time period ({start.isoformat()} to "
f"{end.isoformat()}) is outside of the "
f"{time_limit_hrs} hours of data stored by the "
f"Topology Master. No data will be returned.")
LOG.error(limit_msg)
raise RuntimeError(limit_msg)
if start < limit and end > limit:
truncated_duration: float = round(((end - limit).total_seconds() /
3600), 2)
truncated_msg: str = (f"The start ({start.isoformat()}) of the "
f"supplied time window is beyond the "
f"{time_limit_hrs} hours stored by the Topology "
f"Master. Results will be limited to "
f"{truncated_duration} hours from "
f"{limit.isoformat()} to {end.isoformat()}")
LOG.warning(truncated_msg)
warnings.warn(truncated_msg, RuntimeWarning)
def instance_timelines_to_dataframe(
instance_timelines: dict, stream: Optional[str], measurement_name: str,
conversion_func: Callable[[str], Union[str, int, float]] = None,
source_component: str = None) -> pd.DataFrame:
""" Converts the timeline dictionaries of a *single metric* into a single
combined DataFrame for all instances. All timestamps are converted to UTC
Python datetime objects and the returned DataFrame (for each instance) is
sorted by ascending date.
Arguments:
instance_timelines (dict): A dictionary of instance metric timelines,
where each key is an instance name linking
to a dictionary of <timestamp> :
<measurement> pairs.
stream (str): The stream name that these metrics are related to.
measurement_name (str): The name of the measurements being processed.
This will be used as the measurement column
heading.
conversion_func (function): An optional function for converting the
measurement in the timeline. If not
supplied the measurement will be left as a
string.
Returns:
pandas.DataFrame: A DataFrame containing the timelines of all instances
in the supplied dictionary.
"""
output: List[ROW_DICT] = []
instance_name: str
timeline: Dict[str, str]
for instance_name, timeline in instance_timelines.items():
details = tracker.parse_instance_name(instance_name)
instance_list: List[ROW_DICT] = []
timestamp_str: str
measurement_str: str
for timestamp_str, measurement_str in timeline.items():
timestamp: dt.datetime = \
dt.datetime.utcfromtimestamp(int(timestamp_str))
if "nan" in measurement_str:
measurement: Union[str, int, float, None] = None
else:
if conversion_func:
measurement = conversion_func(measurement_str)
else:
measurement = measurement_str
row: ROW_DICT = {
"timestamp": timestamp,
"container": details["container"],
"task": details["task_id"],
"component": details["component"],
measurement_name: measurement}
if stream:
row["stream"] = stream
if source_component:
row["source_component"] = source_component
instance_list.append(row)
# Because the original dict returned by the tracker is
# unsorted we need to sort the rows by ascending time
instance_list.sort(
key=lambda instance: instance["timestamp"])
output.extend(instance_list)
return pd.DataFrame(output)
def str_nano_to_float_milli(nano_str: str) -> float:
""" Converts a string of a nano measurement into a millisecond float value.
"""
return float(nano_str) / 1000000.0
class HeronTMasterClient(HeronMetricsClient):
""" Class for extracting metrics from the Heron Topology Master metrics
store. """
def __init__(self, config: dict) -> None:
super().__init__(config)
self.tracker_url = config[ConfKeys.HERON_TRACKER_URL.value]
self.time_limit_hrs = \
config.get(ConfKeys.HERON_TMASTER_METRICS_MAX_HOURS.value, 3)
LOG.info("Created Topology Master metrics client using Heron Tracker "
"at: %s", self.tracker_url)
def __hash__(self) -> int:
return hash(self.tracker_url)
def __eq__(self, other: object) -> bool:
if not isinstance(other, HeronTMasterClient):
return False
if self.tracker_url == other.tracker_url:
return True
return False
def _query_setup(self, topology_id: str, cluster: str, environ: str,
start: dt.datetime, end: dt.datetime,
) -> Tuple[Dict[str, Any], int, int]:
""" Helper method for setting up each of the query methods with the
required variables."""
time_check(start, end, self.time_limit_hrs)
start_time: int = int(round(start.timestamp()))
end_time: int = int(round(end.timestamp()))
logical_plan: Dict[str, Any] = tracker.get_logical_plan(
self.tracker_url, cluster, environ, topology_id)
return logical_plan, start_time, end_time
def get_component_service_times(self, topology_id: str, cluster: str,
environ: str, component_name: str,
start: int, end: int, logical_plan:
Dict[str, Any]=None) -> pd.DataFrame:
""" Gets the service times, as a timeseries, for every instance of the
specified component of the specified topology. The start and end times
define the window over which to gather the metrics. The window duration
should be less then 3 hours as this is the limit of what the Topology
master stores.
Arguments:
topology_id (str): The topology identification string.
cluster (str): The cluster the topology is running in.
environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
component_name (str): The name of the component whose metrics are
required.
start (int): Start time for the time period the query is run
against. This should be a UTC POSIX time integer
(seconds since epoch).
end (int): End time for the time period the query is run against.
This should be a UTC POSIX time integer (seconds since
epoch).
logical_plan (dict): Optional dictionary logical plan returned
by the Heron Tracker API. If not supplied
this method will call the API to get the
logical plan.
Returns:
pandas.DataFrame: A DataFrame containing the service time
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp: The UTC timestamp for the metric time period,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from,
* stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* execute-latency-ms: The average execute latency measurement in
milliseconds for that metric time period.
"""
LOG.info("Getting service time metrics for component %s of topology "
"%s", component_name, topology_id)
if not logical_plan:
LOG.debug("Logical plan not supplied, fetching from Heron Tracker")
logical_plan = tracker.get_logical_plan(self.tracker_url, cluster,
environ, topology_id)
incoming_streams: List[Tuple[str, str]] = \
tracker.incoming_sources_and_streams(logical_plan, component_name)
metrics: List[str] = ["__execute-latency/" + source + "/" + stream
for source, stream in incoming_streams]
results: Dict[str, Any] = tracker.get_metrics_timeline(
self.tracker_url, cluster, environ, topology_id, component_name,
start, end, metrics)
output: pd.DataFrame = None
for stream_metric, instance_timelines in results["timeline"].items():
metric_list: List[str] = stream_metric.split("/")
incoming_source: str = metric_list[1]
incoming_stream: str = metric_list[2]
instance_tls_df: pd.DataFrame = instance_timelines_to_dataframe(
instance_timelines, incoming_stream, "latency_ms",
str_nano_to_float_milli, incoming_source)
if output is None:
output = instance_tls_df
else:
output = output.append(instance_tls_df, ignore_index=True)
return output
def get_service_times(self, topology_id: str, cluster: str, environ: str,
start: dt.datetime, end: dt.datetime,
**kwargs: Union[str, int, float]) -> pd.DataFrame:
""" Gets the service times, as a timeseries, for every instance of the
of all the bolt components of the specified topology. The start and end
times define the window over which to gather the metrics. The window
duration should be less than 3 hours as this is the limit of what the
Topology master stores.
Arguments:
topology_id (str): The topology identification string.
start (datetime): utc datetime instance for the start of the
metrics gathering period.
end (datetime): utc datetime instance for the end of the
metrics gathering period.
**cluster (str): The cluster the topology is running in.
**environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
Returns:
pandas.DataFrame: A DataFrame containing the service time
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp:The UTC timestamp for the metric time period,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from,
* stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* latency_ms: The average execute latency measurement in
milliseconds for that metric time period.
"""
LOG.info("Getting service times for topology %s over a %d second "
"period from %s to %s", topology_id,
(end-start).total_seconds(), start.isoformat(),
end.isoformat())
logical_plan, start_time, end_time = self._query_setup(
topology_id, cluster, environ, start, end)
output: pd.DataFrame = None
bolts: Dict[str, Any] = logical_plan["bolts"]
bolt_component: str
for bolt_component in bolts:
try:
bolt_service_times: pd.DataFrame = \
self.get_component_service_times(topology_id,
cluster, environ,
bolt_component,
start_time, end_time,
logical_plan)
except HTTPError as http_error:
LOG.warning("Fetching execute latencies for component %s "
"failed with status code %s", bolt_component,
str(http_error.response.status_code))
else:
if output is None:
output = bolt_service_times
else:
output = output.append(bolt_service_times,
ignore_index=True)
return output
def get_component_emission_counts(self, topology_id: str, cluster: str,
environ: str, component_name: str,
start: int, end: int,
logical_plan: Dict[str, Any] = None
) -> pd.DataFrame:
""" Gets the emit counts, as a timeseries, for every instance of the
specified component of the specified topology. The start and end times
define the window over which to gather the metrics. The window duration
should be less then 3 hours as this is the limit of what the Topology
master stores.
Arguments:
topology_id (str): The topology identification string.
cluster (str): The cluster the topology is running in.
environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
component_name (str): The name of the component whose metrics are
required.
start (int): Start time for the time period the query is run
against. This should be a UTC POSIX time integer
(seconds since epoch).
end (int): End time for the time period the query is run against.
This should be a UTC POSIX time integer (seconds since
epoch).
logical_plan (dict): Optional dictionary logical plan returned
by the Heron Tracker API. If not supplied
this method will call the API to get the
logical plan.
Returns:
pandas.DataFrame: A DataFrame containing the emit count
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp:The UTC timestamp for the metric time period,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from,
* stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* emit_count: The emit count in that metric time period.
"""
LOG.info("Getting emit count metrics for component %s of topology "
"%s", component_name, topology_id)
if not logical_plan:
LOG.debug("Logical plan not supplied, fetching from Heron Tracker")
logical_plan = tracker.get_logical_plan(self.tracker_url, cluster,
environ, topology_id)
outgoing_streams: List[str] = tracker.get_outgoing_streams(
logical_plan, component_name)
metrics: List[str] = ["__emit-count/" + stream
for stream in outgoing_streams]
results: Dict[str, Any] = tracker.get_metrics_timeline(
self.tracker_url, cluster, environ, topology_id, component_name,
start, end, metrics)
output: pd.DataFrame = None
for stream_metric, instance_timelines in results["timeline"].items():
outgoing_stream: str = stream_metric.split("/")[-1]
instance_tls_df: pd.DataFrame = instance_timelines_to_dataframe(
instance_timelines, outgoing_stream, "emit_count",
lambda m: int(float(m)))
if output is None:
output = instance_tls_df
else:
output = output.append(instance_tls_df, ignore_index=True)
return output
def get_emit_counts(self, topology_id: str, cluster: str, environ: str,
start: dt.datetime, end: dt.datetime,
**kwargs: Union[str, int, float]) -> pd.DataFrame:
""" Gets the emit counts, as a timeseries, for every instance of each
of the components of the specified topology. The start and end times
define the window over which to gather the metrics. The window duration
should be less than 3 hours as this is the limit of what the Topology
master stores.
Arguments:
topology_id (str): The topology identification string.
start (datetime): utc datetime instance for the start of the
metrics gathering period.
end (datetime): utc datetime instance for the end of the
metrics gathering period.
**cluster (str): The cluster the topology is running in.
**environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
Returns:
pandas.DataFrame: A DataFrame containing the emit count
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp: The UTC timestamp for the metric,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from,
* stream: The name of the outing stream from which the tuples that
lead to this metric came from,
* emit_count: The emit count during the metric time period.
"""
LOG.info("Getting emit counts for topology %s over a %d second "
"period from %s to %s", topology_id,
(end-start).total_seconds(), start.isoformat(),
end.isoformat())
logical_plan, start_time, end_time = self._query_setup(
topology_id, cluster, environ, start, end)
output: pd.DataFrame = None
components: List[str] = (list(logical_plan["spouts"].keys()) +
list(logical_plan["bolts"].keys()))
for component in components:
try:
comp_emit_counts: pd.DataFrame = \
self.get_component_emission_counts(
topology_id, cluster, environ, component,
start_time, end_time, logical_plan)
except HTTPError as http_error:
LOG.warning("Fetching emit counts for component %s failed with"
" status code %s", component,
str(http_error.response.status_code))
if output is None:
output = comp_emit_counts
else:
output = output.append(comp_emit_counts, ignore_index=True)
return output
def get_component_execute_counts(self, topology_id: str, cluster: str,
environ: str, component_name: str,
start: int, end: int,
logical_plan: Dict[str, Any] = None
) -> pd.DataFrame:
""" Gets the execute counts, as a timeseries, for every instance of the
specified component of the specified topology. The start and end times
define the window over which to gather the metrics. The window duration
should be less then 3 hours as this is the limit of what the Topology
master stores.
Arguments:
topology_id (str): The topology identification string.
cluster (str): The cluster the topology is running in.
environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
component_name (str): The name of the component whose metrics are
required.
start (int): Start time for the time period the query is run
against. This should be a UTC POSIX time integer
(seconds since epoch).
end (int): End time for the time period the query is run against.
This should be a UTC POSIX time integer (seconds since
epoch).
logical_plan (dict): Optional dictionary logical plan returned
by the Heron Tracker API. If not supplied
this method will call the API to get the
logical plan.
Returns:
pandas.DataFrame: A DataFrame containing the emit count
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp: The UTC timestamp for the metric time period,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from.
* stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* execute_count: The execute count in that metric time period.
"""
LOG.info("Getting execute count metrics for component %s of topology "
"%s", component_name, topology_id)
if not logical_plan:
LOG.debug("Logical plan not supplied, fetching from Heron Tracker")
logical_plan = tracker.get_logical_plan(self.tracker_url, cluster,
environ, topology_id)
incoming_streams: List[Tuple[str, str]] = \
tracker.incoming_sources_and_streams(logical_plan, component_name)
metrics: List[str] = ["__execute-count/" + source + "/" + stream
for source, stream in incoming_streams]
results: Dict[str, Any] = tracker.get_metrics_timeline(
self.tracker_url, cluster, environ, topology_id, component_name,
start, end, metrics)
output: pd.DataFrame = None
for stream_metric, instance_timelines in results["timeline"].items():
metric_list: List[str] = stream_metric.split("/")
incoming_source: str = metric_list[1]
incoming_stream: str = metric_list[2]
instance_tls_df: pd.DataFrame = instance_timelines_to_dataframe(
instance_timelines, incoming_stream, "execute_count",
lambda m: int(float(m)), incoming_source)
if output is None:
output = instance_tls_df
else:
output = output.append(instance_tls_df, ignore_index=True)
return output
def get_execute_counts(self, topology_id: str, cluster: str, environ: str,
start: dt.datetime, end: dt.datetime,
**kwargs: Union[str, int, float]) -> pd.DataFrame:
""" Gets the execute counts, as a timeseries, for every instance of
each of the components of the specified topology. The start and end
times define the window over which to gather the metrics. The window
duration should be less than 3 hours as this is the limit of what the
Topology master stores.
Arguments:
topology_id (str): The topology identification string.
start (datetime): UTC datetime instance for the start of the
metrics gathering period.
end (datetime): UTC datetime instance for the end of the
metrics gathering period.
**cluster (str): The cluster the topology is running in.
**environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
Returns:
pandas.DataFrame: A DataFrame containing the service time
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp: The UTC timestamp for the metric,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from.
* stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* source_component: The name of the component the stream's source
instance belongs to,
* execute_count: The execute count during the metric time period.
"""
LOG.info("Getting execute counts for topology %s over a %d second "
"period from %s to %s", topology_id,
(end-start).total_seconds(), start.isoformat(),
end.isoformat())
logical_plan, start_time, end_time = self._query_setup(
topology_id, cluster, environ, start, end)
output: pd.DataFrame = None
for component in logical_plan["bolts"].keys():
try:
comp_execute_counts: pd.DataFrame = \
self.get_component_execute_counts(topology_id, cluster,
environ, component,
start_time, end_time,
logical_plan)
except HTTPError as http_error:
LOG.warning("Fetching execute counts for component %s failed "
"with status code %s", component,
str(http_error.response.status_code))
if output is None:
output = comp_execute_counts
else:
output = output.append(comp_execute_counts, ignore_index=True)
return output
def get_spout_complete_latencies(self, topology_id: str, cluster: str,
environ: str, component_name: str,
start: int, end: int,
logical_plan: Dict[str, Any] = None
) -> pd.DataFrame:
""" Gets the complete latency, as a timeseries, for every instance of
the specified component of the specified topology. The start and end
times define the window over which to gather the metrics. The window
duration should be less then 3 hours as this is the limit of what the
Topology master stores.
Arguments:
topology_id (str): The topology identification string.
cluster (str): The cluster the topology is running in.
environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
component_name (str): The name of the spout component whose
metrics are required.
start (int): Start time for the time period the query is run
against. This should be a UTC POSIX time integer
(seconds since epoch).
end (int): End time for the time period the query is run against.
This should be a UTC POSIX time integer (seconds since
epoch).
logical_plan (dict): Optional dictionary logical plan returned
by the Heron Tracker API. If not supplied
this method will call the API to get the
logical plan.
Returns:
pandas.DataFrame: A DataFrame containing the complete latency
measurements as a timeseries. Each row represents a measurement
(averaged over one minute) with the following columns:
* timestamp: The UTC timestamp for the metric,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from,
* stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* latency_ms: The average execute latency measurement in
milliseconds for that metric time period.
"""
LOG.info("Getting complete latency metrics for component %s of "
"topology %s", component_name, topology_id)
if not logical_plan:
LOG.debug("Logical plan not supplied, fetching from Heron Tracker")
logical_plan = tracker.get_logical_plan(self.tracker_url, cluster,
environ, topology_id)
outgoing_streams: List[str] = \
tracker.get_outgoing_streams(logical_plan, component_name)
metrics: List[str] = ["__complete-latency/" + stream
for stream in outgoing_streams]
results: Dict[str, Any] = tracker.get_metrics_timeline(
self.tracker_url, cluster, environ, topology_id, component_name,
start, end, metrics)
output: pd.DataFrame = None
for stream_metric, instance_timelines in results["timeline"].items():
metric_list: List[str] = stream_metric.split("/")
outgoing_stream: str = metric_list[1]
instance_tls_df: pd.DataFrame = instance_timelines_to_dataframe(
instance_timelines, outgoing_stream, "latency_ms",
str_nano_to_float_milli)
if output is None:
output = instance_tls_df
else:
output = output.append(instance_tls_df, ignore_index=True)
return output
def get_complete_latencies(self, topology_id: str, cluster: str,
environ: str, start: dt.datetime,
end: dt.datetime,
**kwargs: Union[str, int, float]
) -> pd.DataFrame:
""" Gets the complete latencies, as a timeseries, for every instance of
the of all the spout components of the specified topology. The start
and end times define the window over which to gather the metrics. The
window duration should be less than 3 hours as this is the limit of
what the Topology master stores.
Arguments:
topology_id (str): The topology identification string.
cluster (str): The cluster the topology is running in.
environ (str): The environment the topology is running in (eg.
prod, devel, test, etc).
start (datetime): utc datetime instance for the start of the
metrics gathering period.
end (datetime): utc datetime instance for the end of the
metrics gathering period.
Returns:
pandas.DataFrame: A DataFrame containing the service time
measurements as a timeseries. Each row represents a measurement
(aggregated over one minute) with the following columns:
* timestamp: The UTC timestamp for the metric,
* component: The component this metric comes from,
* task: The instance ID number for the instance that the metric
comes from,
* container: The ID for the container this metric comes from,
stream: The name of the incoming stream from which the tuples
that lead to this metric came from,
* latency_ms: The average execute latency measurement in
milliseconds for that metric time period.
Raises:
RuntimeWarning: If the specified topology has a reliability mode
that does not enable complete latency.
"""
LOG.info("Getting complete latencies for topology %s over a %d second "
"period from %s to %s", topology_id,
(end-start).total_seconds(), start.isoformat(),
end.isoformat())
logical_plan, start_time, end_time = self._query_setup(
topology_id, cluster, environ, start, end)
# First we need to check that the supplied topology will actually have
# complete latencies. Only ATLEAST_ONCE and EXACTLY_ONCE will have
# complete latency values as acking is disabled for ATMOST_ONCE.
physical_plan: Dict[str, Any] = tracker.get_physical_plan(
self.tracker_url, cluster, environ, topology_id)
if (physical_plan["config"]
["topology.reliability.mode"] == "ATMOST_ONCE"):
rm_msg: str = (f"Topology {topology_id} reliability mode is set "
f"to ATMOST_ONCE. Complete latency is not "
f"available for these types of topologies")
LOG.warning(rm_msg)
warnings.warn(rm_msg, RuntimeWarning)
return | pd.DataFrame() | pandas.DataFrame |
# Standard packages
from netCDF4 import Dataset, num2date
from datetime import datetime
import numpy as np
import pandas as pd
#____________Selecting a season (DJF,DJFM,NDJFM,JJA)
def sel_season(var,dates,season,timestep):
#----------------------------------------------------------------------------------------
#print('____________________________________________________________________________________________________________________')
#print('Selecting only {0} data'.format(season))
#----------------------------------------------------------------------------------------
dates_pdh = | pd.to_datetime(dates) | pandas.to_datetime |
import time
from definitions_toxicity import ROOT_DIR
import pandas as pd
from src.preprocessing import custom_transformers as ct
from sklearn.pipeline import Pipeline
import nltk
import pickle
from src.preprocessing.text_utils import tokenize_by_sentences, fit_tokenizer, tokenize_text_with_sentences
import numpy as np
from sklearn.model_selection import train_test_split
from skmultilearn.model_selection import iterative_train_test_split
def load_text_data(data_filepath, text_column, copy_text_column=False, copy_col_name=None):
"""Data loader
:param data_filepath: file path to data
:type data_filepath: string
:param text_column: name of column with text
:type text_column: string
:param copy_text_column: whether to copy column with text into new one, defaults to False
:type copy_text_column: bool, optional
:param copy_col_name: name of new column for copying original one, defaults to None
:type copy_col_name: boolean, optional
:return: dataframe with text data
:rtype: Pandas dataframe
"""
df = pd.read_csv(ROOT_DIR + data_filepath)
if copy_text_column:
df[copy_col_name] = df[text_column].copy()
return df
def split_dataset(df, columns, labels, split='train_test', test_size=0.3, random_state=111, stratify=False, multiclass=False):
"""Data split function that can use both sklearn train_test_split and skmultilearn iterative_train_test_split for cases with imbalanced multilabel data.
:param df: dataframe that requires splitting into train, test and possibly validation sets
:type df: Pandas dataframe
:param columns: names of columns that will be left in train/test/valiation sets
:type columns: list
:param labels: name of columns that represent labels
:type labels: list
:param split: selection of how to split dataset: train_test or train_val_test, defaults to 'train_test'
:type split: str, optional
:param test_size: fraction of whole dataset to be used as test set, defaults to 0.3
:type test_size: float, optional
:param random_state: random state for splitting, defaults to 111
:type random_state: int, optional
:param stratify: whether to stratify the data, defaults to False
:type stratify: bool, optional
:param multiclass: whether dataset has multiclass labels, defaults to False
:type multiclass: bool, optional
:return: train, test and optionally validation sets
:rtype: Pandas dataframe
"""
# split on train and validation sets
assert split == 'train_test' or split == 'train_val_test', "Split attribute accepts only 'train_test' or 'train_val_test'"
strat = None
if stratify:
strat = df[labels]
if not multiclass:
x_tr, x_test, y_tr, y_test = train_test_split(df[columns],
df[labels],
test_size=test_size,
random_state=random_state,
stratify=strat)
else:
x_tr, y_tr, x_test, y_test = iterative_train_test_split(df[columns].values,
df[labels].values,
test_size=test_size)
x_tr = pd.DataFrame(x_tr, columns=columns)
y_tr = pd.DataFrame(y_tr, columns=labels)
x_test = pd.DataFrame(x_test, columns=columns)
y_test = | pd.DataFrame(y_test, columns=labels) | pandas.DataFrame |
#%%
import ee
from ee.data import exportTable
import eemont
import re
from datetime import datetime
import pandas as pd
import numpy as np
from pandas.core import frame
import geopandas as gpd
import matplotlib.pyplot as plt
import dload
from py01_helper_functions import ee_collection_pull, process_gdf
# %%
ee.Authenticate()
ee.Initialize()
# %%
sa_cereals_class = gpd.read_file('sa-cereals/sa_cereals_class.shp')
#%% DEFINE COLLECTION
# Landsat Spectral Indicies Collections
l5_EVI = "LANDSAT/LT05/C01/T1_8DAY_EVI"
l5_NDVI = "LANDSAT/LT05/C01/T1_8DAY_NDVI"
l5_NDSI = "LANDSAT/LT05/C01/T1_8DAY_NDSI"
l5_NBR = "LANDSAT/LT05/C01/T1_8DAY_NBRT"
l7_EVI = "LANDSAT/LE07/C01/T1_8DAY_EVI"
l7_NDVI = "LANDSAT/LE07/C01/T1_8DAY_NDVI"
l7_NDSI = "LANDSAT/LE07/C01/T1_8DAY_NDSI"
l7_NBR = "LANDSAT/LE07/C01/T1_8DAY_NBRT"
# Initial date of interest (inclusive).
l5_start_date = '1989-01-01'
# Final date of interest (exclusive).
l5_end_date = '1999-12-31'
# Initial date of interest (inclusive).
l7_start_date = '2020-01-01'
# Final date of interest (exclusive).
l7_end_date = '2021-06-30'
#%%
####################################
# EVI COLLECTIONS #
####################################
try:
sa_all_polygons_evi = pd.read_csv('sa_all_polygons_evi.csv')
except:
l5_evi_collection = ee.ImageCollection(l5_EVI)\
.filterDate(l5_start_date, l5_end_date)\
.maskClouds()\
.preprocess()
l7_evi_collection = ee.ImageCollection(l7_EVI)\
.filterDate(l7_start_date, l7_end_date)\
.maskClouds()\
.preprocess()
landsat_5_evi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l5_evi_collection,
index = 'EVI')
landsat_7_evi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l7_evi_collection,
index = 'EVI')
sa_all_polygons_evi = pd.concat([landsat_5_evi, landsat_7_evi])
sa_all_polygons_evi.to_csv('sa_all_polygons_evi.csv')
sa_all_polygons_evi = pd.read_csv('sa_all_polygons_evi.csv')
# %%
####################################
# NDVI COLLECTIONS #
####################################
try:
sa_all_polygons_ndvi = pd.read_csv('sa_all_polygons_ndvi.csv')
except:
l5_ndvi_collection = ee.ImageCollection(l5_NDVI)\
.filterDate(l5_start_date, l5_end_date)\
.maskClouds()\
.preprocess()
l7_ndvi_collection = ee.ImageCollection(l7_NDVI)\
.filterDate(l7_start_date, l7_end_date)\
.maskClouds()\
.preprocess()
landsat_5_ndvi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l5_ndvi_collection,
index = 'NDVI')
landsat_7_ndvi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l7_ndvi_collection,
index = 'NDVI')
sa_all_polygons_ndvi = pd.concat([landsat_5_ndvi, landsat_7_ndvi])
sa_all_polygons_ndvi.to_csv('sa_all_polygons_ndvi.csv')
sa_all_polygons_ndvi = pd.read_csv('sa_all_polygons_ndvi.csv')
# %%
####################################
# NDSI COLLECTIONS #
####################################
try:
sa_all_polygons_ndsi = pd.read_csv('sa_all_polygons_ndsi.csv')
except:
l5_ndsi_collection = ee.ImageCollection(l5_NDSI)\
.filterDate(l5_start_date, l5_end_date)\
.maskClouds()\
.preprocess()
l7_ndsi_collection = ee.ImageCollection(l7_NDSI)\
.filterDate(l7_start_date, l7_end_date)\
.maskClouds()\
.preprocess()
landsat_5_ndsi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l5_ndsi_collection,
index = 'NDSI')
landsat_7_ndsi = process_gdf(geopandas_frame = sa_cereals_class,
collection = l7_ndsi_collection,
index = 'NDSI')
sa_all_polygons_ndsi = pd.concat([landsat_5_ndsi, landsat_7_ndsi])
sa_all_polygons_ndsi.to_csv('sa_all_polygons_ndsi.csv')
sa_all_polygons_ndsi = pd.read_csv('sa_all_polygons_ndsi.csv')
# %%
####################################
# NBR COLLECTIONS #
####################################
try:
sa_all_polygons_nbr = pd.read_csv('sa_all_polygons_nbrt.csv')
except:
l5_nbr_collection = ee.ImageCollection(l5_NBR)\
.filterDate(l5_start_date, l5_end_date)\
.maskClouds()\
.preprocess()
l7_nbr_collection = ee.ImageCollection(l7_NBR)\
.filterDate(l7_start_date, l7_end_date)\
.maskClouds()\
.preprocess()
landsat_5_nbr = process_gdf(geopandas_frame = sa_cereals_class,
collection = l5_nbr_collection,
index = 'NBRT')
landsat_7_nbr = process_gdf(geopandas_frame = sa_cereals_class,
collection = l7_nbr_collection,
index = 'NBRT')
sa_all_polygons_nbr = | pd.concat([landsat_5_nbr, landsat_7_nbr]) | pandas.concat |
import datetime as dt
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
import seaice.nasateam as nt
import seaice.tools.plotter.daily_extent as de
class Test_BoundingDateRange(unittest.TestCase):
def test_standard(self):
today = dt.date(2015, 9, 22)
month_bounds = (-3, 1)
expected_bounds = (dt.date(2015, 6, 1), dt.date(2015, 10, 31))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_year(self):
today = dt.date(2001, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2000, 12, 1), dt.date(2001, 2, 28))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_leap_year(self):
today = dt.date(2016, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2015, 12, 1), dt.date(2016, 2, 29))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
class Test_GetRecordYear(unittest.TestCase):
start_date = nt.BEGINNING_OF_SATELLITE_ERA
end_date = dt.date(2015, 12, 31)
date_index = pd.date_range(start_date, end_date)
base_series = pd.Series(index=date_index).fillna(5)
def _series(self, low=None, high=None, next_highest=None, next_lowest=None):
"""Return a series for easily testing record values. All the values are 5, with
different values set to the dates passed in as low, next_lowest, high,
and next_highest. The index of the returned series is from the beginning
of the satellite era to the end of 2015 (since that happens to be the
last complete year at the time of this writing).
"""
series = self.base_series.copy()
if high:
series[high] = 10
if next_highest:
series[next_highest] = 7
if next_lowest:
series[next_lowest] = 2
if low:
series[low] = 0
return series
def test_max(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002 , recordline:2002"""
series = self._series(high='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002(min) , recordline:2002"""
series = self._series(low='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014, recordline:2010"""
series = self._series(high='2014-03-15', next_highest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014(min), recordline:2010"""
series = self._series(low='2014-03-15', next_lowest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_is_included_in_month_bounds(self):
"""Date: 2/2015, range: 10/2014 -> 3/2015, record: 1/2014, recordline: 2013-2014"""
series = self._series(low='2014-04-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-02-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_before_and_crossover_forward(self):
"""Date: 12/2015, range: 8/2015 -> 1/2016, record: 12/2014, recordline: 2014-2015"""
series = self._series(low='2014-09-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-12-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004, recordline:2004"""
series = self._series(high='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004(min), recordline:2003-2004"""
series = self._series(low='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(high='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(low='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(high='2009-11-27', next_highest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(low='2009-11-27', next_lowest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_picks_most_months(self):
"""Date: 1/2010, range: 11/2009 -> 3/2010, record:10/2008, recordline:2007-2008"""
series = self._series(high='2008-10-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 2)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_picks_most_months(self):
"""Date: 1/2010, range: 11/2009 -> 3/2010, record:8/2008, recordline:2007-2008"""
series = self._series(low='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 2)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_picks_most_months_next_highest_record(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record: 8/2009, recordline: 2008-2009 """
series = self._series(high='2009-08-27', next_highest='2004-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_picks_most_months_next_highest_record(self):
"""Date: 1/2010, range:10/2009 -> 2/2010, record: 8/2009, recordline: 2008-2009"""
series = self._series(low='2009-08-27', next_lowest='2004-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_past_record_same_year(self):
"""Date: 9/2015, range:6/2015 -> 10/2015, record: 3/2015, recordline: 2010"""
series = self._series(low='2015-03-27', next_lowest='2010-03-28')
date = pd.to_datetime('2015-09-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_past_record_same_year_with_overlap(self):
"""Date: 9/2015, range:6/2015 -> 1/2016, record: 3/2015, recordline: 2014-2015"""
series = self._series(low='2015-03-27', next_lowest='2010-03-28')
date = pd.to_datetime('2015-09-15')
month_bounds = (-3, 4)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_same_most_months_picks_earlier_year(self):
"""Date: 1/2010, range: 11/2009 -> 2/2010, record: 8/2008 , recordline:2008-2009"""
series = self._series(high='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_starts_january_contains_record_month_same_year(self):
"""Date: 12/09, range: 09/2009 -> 1/2010, record: 9/2008 , recordline:2008-2009"""
series = self._series(high='2008-09-22')
date = pd.to_datetime('2009-12-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_starts_feb_contains_record_month_different_year(self):
"""Date: 1/10, range: 09/2009 -> 2/2010, record: 9/2008 , recordline:2008-2009"""
series = self._series(high='2008-09-22')
date = pd.to_datetime('2010-01-15')
month_bounds = (-4, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_same_most_months_picks_earlier_year(self):
"""Date: 1/2010, range: 11/2009 -> 2/2010, record:8/2008 , recordline:2008-2009"""
series = self._series(low='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
class Test_YearWithMostMonthsInIndex(unittest.TestCase):
def test_longer_year_earlier(self):
index = pd.date_range(start='1999-01-01', end='2000-01-31')
actual = de._year_with_most_months_in_index(index)
expected = 1999
self.assertEqual(actual, expected)
def test_longer_year_later(self):
index = pd.date_range(start='1999-11-01', end='2000-04-29')
actual = de._year_with_most_months_in_index(index)
expected = 2000
self.assertEqual(actual, expected)
def test_earlier_year_when_equal_months(self):
index = pd.date_range(start='1999-11-01', end='2000-02-29')
actual = de._year_with_most_months_in_index(index)
expected = 1999
self.assertEqual(actual, expected)
class Test_DateIndexPrependDays(unittest.TestCase):
def test_adds_days_to_beginning_of_date_index(self):
date_index = pd.date_range(start='2005-01-05', end='2005-01-10')
days = 5
actual = de._date_index_prepend_days(date_index, days)
expected = pd.date_range(start='2004-12-31', end='2005-01-10')
self.assertTrue(actual.equals(expected))
class Test__ExtendSmoothDivide(unittest.TestCase):
def test_does_all_the_things(self):
date_index = pd.date_range(start='2000-01-06', end='2000-01-08')
nday_average = 3
divisor = 1e3
df_index = | pd.Index([6, 7, 8], name='day of year') | pandas.Index |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import time
# In[2]:
df1=pd.read_csv("loan_data.csv")
df1.head()
# In[3]:
df1 = | pd.get_dummies(df1,['purpose'],drop_first=True) | pandas.get_dummies |
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
parser = all_parsers
data = """2,0,1
1000,2000,3000
4000,5000,6000"""
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
def test_usecols_with_parse_dates(all_parsers, usecols):
# see gh-9755
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parser = all_parsers
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates2(all_parsers):
# see gh-13604
parser = all_parsers
data = """2008-02-07 09:40,1032.43
2008-02-07 09:50,1042.54
2008-02-07 10:00,1051.65"""
names = ["date", "values"]
usecols = names[:]
parse_dates = [0]
index = Index(
[
Timestamp("2008-02-07 09:40"),
Timestamp("2008-02-07 09:50"),
| Timestamp("2008-02-07 10:00") | pandas._libs.tslib.Timestamp |
"""
This module tests high level dataset API functions which require entire datasets, indices, etc
"""
from collections import OrderedDict
import pandas as pd
import pandas.testing as pdt
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
def test_dataset_get_indices_as_dataframe_partition_keys_only(
dataset_with_index, store_session
):
expected = pd.DataFrame(
OrderedDict([("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
result = ds.get_indices_as_dataframe(columns=dataset_with_index.partition_keys)
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe(dataset_with_index, store_session):
expected = pd.DataFrame(
OrderedDict([("L", [1, 2]), ("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
ds = ds.load_index("L", store_session)
result = ds.get_indices_as_dataframe()
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe_duplicates():
ds = DatasetMetadata(
"some_uuid",
indices={
"l_external_code": ExplicitSecondaryIndex(
"l_external_code", {"1": ["part1", "part2"], "2": ["part1", "part2"]}
),
"p_external_code": ExplicitSecondaryIndex(
"p_external_code", {"1": ["part1"], "2": ["part2"]}
),
},
)
expected = pd.DataFrame(
OrderedDict(
[
("p_external_code", ["1", "1", "2", "2"]),
("l_external_code", ["1", "2", "1", "2"]),
]
),
index= | pd.Index(["part1", "part1", "part2", "part2"], name="partition") | pandas.Index |
# Licensed to Modin Development Team under one or more contributor license
# agreements. See the NOTICE file distributed with this work for additional
# information regarding copyright ownership. The Modin Development Team
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# This file is copied and adapted from:
# http://github.com/modin-project/modin/master/modin/pandas/test/test_general.py
import sys
import pytest
import pandas
import numpy as np
from numpy.testing import assert_array_equal
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
modin_compatible_version = sys.version_info >= (3, 7, 0)
modin_installed = True
if modin_compatible_version:
try:
import modin # noqa: F401
except ModuleNotFoundError:
modin_installed = False
skip = not modin_compatible_version or not modin_installed
# These tests are written for versions of Modin that require python 3.7+
pytestmark = pytest.mark.skipif(skip, reason="Outdated or missing Modin dependency")
if not skip:
from ray.tests.modin.modin_test_utils import df_equals
import modin.pandas as pd
# Module scoped fixture. Will first run all tests without ray
# client, then rerun all tests with a single ray client session.
@pytest.fixture(params=[False, True], autouse=True, scope="module")
def run_ray_client(request):
if request.param:
with ray_start_client_server() as client:
yield client
else:
# Run without ray client (do nothing)
yield
# Cleanup state before rerunning tests with client
ray.shutdown()
random_state = np.random.RandomState(seed=42)
# Size of test dataframes
NCOLS, NROWS = (2 ** 6, 2 ** 8)
# Range for values for test data
RAND_LOW = 0
RAND_HIGH = 100
# Input data and functions for the tests
# The test data that we will test our code against
test_data = {
"int_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(
RAND_LOW, RAND_HIGH, size=(NROWS)
)
for i in range(NCOLS)
},
"float_nan_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
x
if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)
else np.NaN
for j, x in enumerate(
random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))
)
]
for i in range(NCOLS)
},
}
test_data["int_data"]["index"] = test_data["int_data"].pop(
"col{}".format(int(NCOLS / 2))
)
for col in test_data["float_nan_data"]:
for row in range(NROWS // 2):
if row % 16 == 0:
test_data["float_nan_data"][col][row] = np.NaN
test_data_values = list(test_data.values())
test_data_keys = list(test_data.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notnull(pandas_df)
modin_result = pd.notnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
def test_merge():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = | pandas.DataFrame(frame_data2) | pandas.DataFrame |
# encoding: utf-8
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
from __future__ import absolute_import, print_function, unicode_literals
import collections
import re
from textwrap import dedent
import pytablewriter as ptw
import pytest
import six # noqa: W0611
from pytablewriter.style import Align, FontSize, Style, ThousandSeparator
from tabledata import TableData
from termcolor import colored
from ..._common import print_test_result
from ...data import (
float_header_list,
float_value_matrix,
headers,
mix_header_list,
mix_value_matrix,
style_tabledata,
styles,
value_matrix,
value_matrix_iter,
value_matrix_iter_1,
value_matrix_with_none,
)
try:
import pandas as pd
SKIP_DATAFRAME_TEST = False
except ImportError:
SKIP_DATAFRAME_TEST = True
Data = collections.namedtuple("Data", "table indent header value is_formatting_float expected")
normal_test_data_list = [
Data(
table="",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|---|---|---|---|---|
"""
),
),
Data(
table="floating point",
indent=0,
header=headers,
value=[
["1", 123.09999999999999, "a", "1", 1],
[2, 2.2000000000000002, "bb", "2.2", 2.2000000000000002],
[3, 3.2999999999999998, "ccc", "3.2999999999999998", "cccc"],
],
is_formatting_float=True,
expected=dedent(
"""\
# floating point
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.3|cccc|
"""
),
),
Data(
table="tablename",
indent=1,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
## tablename
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=value_matrix_with_none,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|--:|--:|---|--:|----|
| 1| |a |1.0| |
| |2.2| |2.2| 2.2|
| 3|3.3|ccc| |cccc|
| | | | | |
"""
),
),
Data(
table="",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
| i | f | c | if |ifc|bool| inf |nan|mix_num | time |
|--:|---:|----|---:|---|----|--------|---|-------:|-------------------------|
| 1|1.10|aa | 1.0| 1|X |Infinity|NaN| 1|2017-01-01T00:00:00 |
| 2|2.20|bbb | 2.2|2.2| |Infinity|NaN|Infinity|2017-01-02 03:04:05+09:00|
| 3|3.33|cccc|-3.0|ccc|X |Infinity|NaN| NaN|2017-01-01T00:00:00 |
"""
),
),
Data(
table="formatting float 1",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# formatting float 1
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="formatting float 2",
indent=0,
header=float_header_list,
value=float_value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# formatting float 2
| a | b | c |
|---:|----------:|----:|
|0.01| 0.0012|0.000|
|1.00| 99.9000|0.010|
|1.20|999999.1230|0.001|
"""
),
),
Data(
table="not formatting float 1",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=False,
expected=dedent(
"""\
# not formatting float 1
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a | 1| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc| 3|cccc|
"""
),
),
Data(
table="not formatting float 2",
indent=0,
header=float_header_list,
value=float_value_matrix,
is_formatting_float=False,
expected=dedent(
"""\
# not formatting float 2
| a | b | c |
|---:|---------:|----:|
|0.01| 0.00125| 0|
| 1| 99.9| 0.01|
| 1.2|999999.123|0.001|
"""
),
),
Data(
table="",
indent=0,
header=["Name", "xUnit", "Source", "Remarks"],
value=[
[
"Crotest",
"",
"[160]",
"MIT License. A tiny and simple test framework for Crystal\nwith common assertions and no pollution into Object class.",
"",
]
],
is_formatting_float=True,
expected=dedent(
"""\
| Name |xUnit|Source| Remarks |
|-------|-----|------|--------------------------------------------------------------------------------------------------------------------|
|Crotest| |[160] |MIT License. A tiny and simple test framework for Crystal with common assertions and no pollution into Object class.|
"""
),
),
Data(
table="",
indent=0,
header=["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
value=[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
is_formatting_float=True,
expected=dedent(
"""\
| 姓 | 名 |生年月日|郵便番号| 住所 | 電話番号 |
|----|----|--------|--------|--------------------------|------------|
|山田|太郎|2001/1/1|100-0002|東京都千代田区皇居外苑 |03-1234-5678|
|山田|次郎|2001/1/2|251-0036|神奈川県藤沢市江の島1丁目|03-9999-9999|
"""
),
),
Data(
table="quoted values",
indent=0,
header=['"quote"', '"abc efg"'],
value=[['"1"', '"abc"'], ['"-1"', '"efg"']],
is_formatting_float=True,
expected=dedent(
"""\
# quoted values
|quote|abc efg|
|----:|-------|
| 1|abc |
| -1|efg |
"""
),
),
Data(
table="not str headers",
indent=0,
header=[None, 1, 0.1],
value=[[None, 1, 0.1]],
is_formatting_float=True,
expected=dedent(
"""\
# not str headers
| | 1 |0.1|
|---|--:|--:|
| | 1|0.1|
"""
),
),
Data(
table="no uniform matrix",
indent=0,
header=["a", "b", "c"],
value=[["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1]],
is_formatting_float=True,
expected=dedent(
"""\
# no uniform matrix
| a | b | c |
|---|--:|---|
|a | 0| |
|b | 1|bb |
|c | 2|ccc|
"""
),
),
Data(
table="line breaks",
indent=0,
header=["a\nb", "\nc\n\nd\n", "e\r\nf"],
value=[["v1\nv1", "v2\n\nv2", "v3\r\nv3"]],
is_formatting_float=True,
expected=dedent(
"""\
# line breaks
| a b | c d | e f |
|-----|-----|-----|
|v1 v1|v2 v2|v3 v3|
"""
),
),
Data(
table="empty header",
indent=0,
header=[],
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# empty header
| A | B | C | D | E |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="vertical bar",
indent=1,
header=["a|b", "|c||d|"],
value=[["|v1|v1|", "v2|v2"]],
is_formatting_float=True,
expected=r"""## vertical bar
| a\|b |\|c\|\|d\||
|-------|------|
|\|v1\|v1\||v2\|v2 |
""",
),
Data(
table="mixed value types",
indent=0,
header=["data", "v"],
value=[
[3.4375, 65.5397978633],
[65.5397978633, 127.642095727],
[189.74439359, 189.74439359],
[10064.0097539, 10001.907456],
["next", 10250.3166474],
],
is_formatting_float=True,
expected=dedent(
"""\
# mixed value types
| data | v |
|---------|-------:|
| 3.437| 65.54|
| 65.540| 127.64|
| 189.744| 189.74|
|10064.010|10001.91|
|next |10250.32|
"""
),
),
Data(
table="list of dict",
indent=0,
header=["A", "B", "C"],
value=[
{"A": 1},
{"B": 2.1, "C": "hoge"},
{"A": 0, "B": 0.1, "C": "foo"},
{},
{"A": -1, "B": -0.1, "C": "bar", "D": "extra"},
],
is_formatting_float=False,
expected=dedent(
"""\
# list of dict
| A | B | C |
|--:|---:|----|
| 1| | |
| | 2.1|hoge|
| 0| 0.1|foo |
| | | |
| -1|-0.1|bar |
"""
),
),
]
exception_test_data_list = [
Data(
table="",
indent=0,
header=[],
value=[],
is_formatting_float=True,
expected=ptw.EmptyTableDataError,
)
]
table_writer_class = ptw.MarkdownTableWriter
def trans_func(value):
if value is None:
return ""
if value is True:
return "X"
if value is False:
return ""
return value
class Test_MarkdownTableWriter_write_new_line(object):
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_MarkdownTableWriter_write_table(object):
@pytest.mark.parametrize(
["table", "indent", "header", "value", "is_formatting_float", "expected"],
[
[
data.table,
data.indent,
data.header,
data.value,
data.is_formatting_float,
data.expected,
]
for data in normal_test_data_list
],
)
def test_normal(self, capsys, table, indent, header, value, is_formatting_float, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
writer.is_formatting_float = is_formatting_float
writer.register_trans_func(trans_func)
writer.write_table()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
assert writer.dumps() == expected
def test_normal_single_tabledata(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"loader_mapping",
["Name", "Loader"],
[
["csv", "CsvTableFileLoader"],
["excel", "ExcelTableFileLoader"],
["html", "HtmlTableFileLoader"],
["markdown", "MarkdownTableFileLoader"],
["mediawiki", "MediaWikiTableFileLoader"],
["json", "JsonTableFileLoader"],
["Long Format Name", "Loader"],
],
)
)
writer.write_table()
expected = dedent(
"""\
# loader_mapping
| Name | Loader |
|----------------|------------------------|
|csv |CsvTableFileLoader |
|excel |ExcelTableFileLoader |
|html |HtmlTableFileLoader |
|markdown |MarkdownTableFileLoader |
|mediawiki |MediaWikiTableFileLoader|
|json |JsonTableFileLoader |
|Long Format Name|Loader |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_multiple_write(self, capsys):
writer = table_writer_class()
writer.is_write_null_line_after_table = True
writer.from_tabledata(
TableData(
"first",
["Name", "Loader"],
[["csv", "CsvTableFileLoader"], ["excel", "ExcelTableFileLoader"]],
)
)
writer.write_table()
writer.from_tabledata(
TableData("second", ["a", "b", "c"], [["1", "AA", "abc"], ["2", "BB", "zzz"]])
)
writer.write_table()
expected = dedent(
"""\
# first
|Name | Loader |
|-----|--------------------|
|csv |CsvTableFileLoader |
|excel|ExcelTableFileLoader|
# second
| a | b | c |
|--:|---|---|
| 1|AA |abc|
| 2|BB |zzz|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_style_align(self):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"auto align",
["left", "right", "center", "auto", "auto", "None"],
[
[0, "r", "center align", 0, "a", "n"],
[11, "right align", "bb", 11, "auto", "none (auto)"],
],
)
)
expected = dedent(
"""\
# auto align
|left| right | center |auto|auto| None |
|---:|-----------|------------|---:|----|-----------|
| 0|r |center align| 0|a |n |
| 11|right align|bb | 11|auto|none (auto)|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
writer.table_name = "specify alignment for each column manually"
writer.styles = [
Style(align=Align.LEFT),
Style(align=Align.RIGHT),
Style(align=Align.CENTER),
Style(align=Align.AUTO),
Style(align=Align.AUTO),
None,
]
expected = dedent(
"""\
# specify alignment for each column manually
|left| right | center |auto|auto| None |
|----|----------:|:----------:|---:|----|-----------|
|0 | r|center align| 0|a |n |
|11 |right align| bb | 11|auto|none (auto)|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
# test for backward compatibility
writer.styles = None
writer.align_list = [Align.LEFT, Align.RIGHT, Align.CENTER, Align.AUTO, Align.AUTO, None]
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_thousand_separator(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"",
["none_format", "thousand_separator_i", "thousand_separator_f", "f", "wo_f"],
[
[1000, 1234567, 1234567.8, 1234.5678, 1234567.8],
[1000, 1234567, 1234567.8, 1234.5678, 1234567.8],
],
)
)
writer.styles = [
Style(thousand_separator=ThousandSeparator.NONE),
Style(thousand_separator=ThousandSeparator.COMMA),
Style(thousand_separator=ThousandSeparator.COMMA),
Style(thousand_separator=ThousandSeparator.SPACE),
]
out = writer.dumps()
expected = dedent(
"""\
|none_format|thousand_separator_i|thousand_separator_f| f | wo_f |
|----------:|-------------------:|-------------------:|------:|--------:|
| 1000| 1,234,567| 1,234,567.8|1 234.6|1234567.8|
| 1000| 1,234,567| 1,234,567.8|1 234.6|1234567.8|
"""
)
print_test_result(expected=expected, actual=out)
assert out == expected
writer.styles = None
writer.format_list = [
ptw.Format.NONE,
ptw.Format.THOUSAND_SEPARATOR,
ptw.Format.THOUSAND_SEPARATOR,
ptw.Format.THOUSAND_SEPARATOR,
]
out = writer.dumps()
expected = dedent(
"""\
|none_format|thousand_separator_i|thousand_separator_f| f | wo_f |
|----------:|-------------------:|-------------------:|------:|--------:|
| 1000| 1,234,567| 1,234,567.8|1,234.6|1234567.8|
| 1000| 1,234,567| 1,234,567.8|1,234.6|1234567.8|
"""
)
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_font_size(self):
writer = table_writer_class()
writer.table_name = "style test: font size will not be affected"
writer.headers = ["none", "empty_style", "tiny", "small", "medium", "large"]
writer.value_matrix = [[111, 111, 111, 111, 111, 111], [1234, 1234, 1234, 1234, 1234, 1234]]
writer.styles = [
None,
Style(),
Style(font_size=FontSize.TINY),
Style(font_size=FontSize.SMALL),
Style(font_size=FontSize.MEDIUM),
Style(font_size=FontSize.LARGE),
]
expected = dedent(
"""\
# style test: font size will not be affected
|none|empty_style|tiny|small|medium|large|
|---:|----------:|---:|----:|-----:|----:|
| 111| 111| 111| 111| 111| 111|
|1234| 1234|1234| 1234| 1234| 1234|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_font_weight(self):
writer = table_writer_class()
writer.table_name = "style test: bold"
writer.headers = ["normal", "bold"]
writer.value_matrix = [[11, 11], [123456, 123456]]
writer.styles = [Style(font_weight="normal"), Style(font_weight="bold")]
expected = dedent(
"""\
# style test: bold
|normal| bold |
|-----:|---------:|
| 11| **11**|
|123456|**123456**|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_mix(self):
writer = table_writer_class()
writer.from_tabledata(style_tabledata)
writer.styles = styles
expected = dedent(
"""\
# style test
|none|empty|tiny|small|medium|large|null w/ bold| L bold |S italic|L bold italic|
|---:|----:|---:|----:|-----:|----:|------------|-------:|-------:|------------:|
| 111| 111| 111| 111| 111| 111| | **111**| _111_| _**111**_|
|1234| 1234|1234| 1234| 1,234|1 234| |**1234**| _1234_| _**1234**_|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_set_style(self):
writer = table_writer_class()
writer.table_name = "set style method"
writer.headers = ["normal", "style by idx", "style by header"]
writer.value_matrix = [[11, 11, 11], [123456, 123456, 123456]]
writer.set_style(1, Style(font_weight="bold", thousand_separator=","))
writer.set_style(
"style by header", Style(align="center", font_weight="bold", thousand_separator=" ")
)
expected = dedent(
"""\
# set style method
|normal|style by idx|style by header|
|-----:|-----------:|:-------------:|
| 11| **11**| **11** |
|123456| **123,456**| **123 456** |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
writer.table_name = "change style"
writer.set_style(1, Style(align="right", font_style="italic"))
writer.set_style("style by header", Style())
expected = dedent(
"""\
# change style
|normal|style by idx|style by header|
|-----:|-----------:|--------------:|
| 11| _11_| 11|
|123456| _123456_| 123456|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_ansi_color(self, capsys):
writer = table_writer_class()
writer.table_name = "ANCI escape sequence"
writer.headers = ["colored_i", "colored_f", "colored_s", "wo_anci"]
writer.value_matrix = [
[colored(111, "red"), colored(1.1, "green"), colored("abc", "blue"), "abc"],
[colored(0, "red"), colored(0.12, "green"), colored("abcdef", "blue"), "abcdef"],
]
writer.write_table()
expected = dedent(
"""\
# ANCI escape sequence
|colored_i|colored_f|colored_s|wo_anci|
|--------:|--------:|---------|-------|
| 111| 1.1|abc |abc |
| 0| 0.12|abcdef |abcdef |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
_ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]", re.IGNORECASE)
assert _ansi_escape.sub("", out) == expected
def test_normal_margin_1(self, capsys):
writer = table_writer_class()
writer.from_tabledata(TableData("", headers, value_matrix))
writer.margin = 1
writer.write_table()
expected = dedent(
"""\
| a | b | c | dd | e |
|----:|------:|-----|----:|------|
| 1 | 123.1 | a | 1.0 | 1 |
| 2 | 2.2 | bb | 2.2 | 2.2 |
| 3 | 3.3 | ccc | 3.0 | cccc |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_margin_2(self, capsys):
writer = table_writer_class()
writer.from_tabledata(TableData("", headers, value_matrix))
writer.margin = 2
writer.write_table()
expected = dedent(
"""\
| a | b | c | dd | e |
|------:|--------:|-------|------:|--------|
| 1 | 123.1 | a | 1.0 | 1 |
| 2 | 2.2 | bb | 2.2 | 2.2 |
| 3 | 3.3 | ccc | 3.0 | cccc |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_value_map(self):
writer = table_writer_class()
writer.headers = ["a", "b"]
writer.value_matrix = [["foo", True], ["bar", False]]
writer.register_trans_func(trans_func)
expected = dedent(
"""\
| a | b |
|---|---|
|foo|X |
|bar| |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_avoid_overwrite_stream_by_dumps(self):
writer = table_writer_class()
writer.headers = ["a", "b"]
writer.value_matrix = [["foo", "bar"]]
writer.stream = six.StringIO()
expected = dedent(
"""\
| a | b |
|---|---|
|foo|bar|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
print("--------------------")
writer.write_table()
output = writer.stream.getvalue()
print_test_result(expected=expected, actual=output)
assert output == expected
@pytest.mark.skipif("six.PY2")
def test_normal_escape_html_tag(self, capsys):
writer = table_writer_class()
writer.headers = ["no", "text"]
writer.value_matrix = [[1, "<caption>Table 'formatting for Jupyter Notebook.</caption>"]]
writer.is_escape_html_tag = True
writer.write_table()
expected = dedent(
"""\
|no | text |
|--:|---------------------------------------------------------------------------|
| 1|<caption>Table 'formatting for Jupyter Notebook.</caption>|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.skipif("six.PY2")
def test_normal_escape_html_tag_from_tabledata(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"",
["no", "text"],
[[1, "<caption>Table 'formatting for Jupyter Notebook.</caption>"]],
)
)
writer.is_escape_html_tag = True
writer.write_table()
expected = dedent(
"""\
|no | text |
|--:|---------------------------------------------------------------------------|
| 1|<caption>Table 'formatting for Jupyter Notebook.</caption>|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in exception_test_data_list
],
)
def test_exception(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
with pytest.raises(expected):
writer.write_table()
class Test_MarkdownTableWriter_write_table_iter(object):
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
# tablename
| ha | hb | hc |
|---:|---:|---:|
| 1| 2| 3|
| 11| 12| 13|
| 1| 2| 3|
| 11| 12| 13|
| 101| 102| 103|
|1001|1002|1003|
"""
),
],
[
"mix length",
["string", "hb", "hc"],
value_matrix_iter_1,
dedent(
"""\
# mix length
| string | hb | hc |
|-----------------------------|----:|---:|
|a b c d e f g h i jklmn | 2.1| 3|
|aaaaa | 12.1| 13|
|bbb | 2| 3|
|cc | 12| 13|
|a | 102| 103|
| | 1002|1003|
"""
),
],
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in exception_test_data_list],
)
def test_exception(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
with pytest.raises(expected):
writer.write_table_iter()
class Test_MarkdownTableWriter_dump(object):
def test_normal(self, tmpdir):
test_filepath = str(tmpdir.join("test.sqlite"))
writer = table_writer_class()
writer.headers = ["a", "b"]
writer.value_matrix = [["foo", "bar"]]
writer.dump(test_filepath)
expected = dedent(
"""\
| a | b |
|---|---|
|foo|bar|
"""
)
with open(test_filepath) as f:
output = f.read()
print_test_result(expected=expected, actual=output)
assert output == expected
class Test_MarkdownTableWriter_from_tablib(object):
def test_normal_multiple_write(self, capsys):
try:
import tablib
except ImportError:
pytest.skip("requires tablib")
data = tablib.Dataset()
data.headers = ["a", "b", "c"]
data.append(["1", "AA", "abc"])
data.append(["2", "BB", "zzz"])
writer = table_writer_class()
writer.from_tablib(data)
writer.write_table()
expected = dedent(
"""\
| a | b | c |
|--:|---|---|
| 1|AA |abc|
| 2|BB |zzz|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
class Test_MarkdownTableWriter_line_break_handling(object):
@pytest.mark.parametrize(
["value", "expected"],
[
[
ptw.LineBreakHandling.REPLACE,
dedent(
"""\
|no | text |
|--:|------------|
| 1|first second|
"""
),
],
[
ptw.LineBreakHandling.ESCAPE,
r"""|no | text |
|--:|-------------|
| 1|first\nsecond|
""",
],
[
"escape",
r"""|no | text |
|--:|-------------|
| 1|first\nsecond|
""",
],
],
)
def test_normal_line(self, value, expected):
writer = table_writer_class()
writer.headers = ["no", "text"]
writer.value_matrix = [[1, "first\nsecond"]]
writer.line_break_handling = value
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
@pytest.mark.skipif("SKIP_DATAFRAME_TEST is True")
class Test_MarkdownTableWriter_from_dataframe(object):
@pytest.mark.parametrize(
["add_index_column", "expected"],
[
[
False,
dedent(
"""\
# add_index_column: False
| A | B |
|--:|--:|
| 1| 10|
| 2| 11|
"""
),
],
[
True,
dedent(
"""\
# add_index_column: True
| | A | B |
|---|--:|--:|
|a | 1| 10|
|b | 2| 11|
"""
),
],
],
)
def test_normal(self, tmpdir, add_index_column, expected):
writer = table_writer_class()
writer.table_name = "add_index_column: {}".format(add_index_column)
df = | pd.DataFrame({"A": [1, 2], "B": [10, 11]}, index=["a", "b"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 14 14:54:39 2017
@author: dhingratul
"""
import pandas as pd
countries = [
'Afghanistan', 'Albania', 'Algeria', 'Angola', 'Argentina',
'Armenia', 'Australia', 'Austria', 'Azerbaijan', 'Bahamas',
'Bahrain', 'Bangladesh', 'Barbados', 'Belarus', 'Belgium',
'Belize', 'Benin', 'Bhutan', 'Bolivia',
'Bosnia and Herzegovina'
]
employment_values = [
55.70000076, 51.40000153, 50.5, 75.69999695,
58.40000153, 40.09999847, 61.5, 57.09999847,
60.90000153, 66.59999847, 60.40000153, 68.09999847,
66.90000153, 53.40000153, 48.59999847, 56.79999924,
71.59999847, 58.40000153, 70.40000153, 41.20000076
]
# Employment data in 2007 for 20 countries
employment = | pd.Series(employment_values, index=countries) | pandas.Series |
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from difflib import SequenceMatcher
import seaborn as sns
from statistics import mean
from ast import literal_eval
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from pygam import LinearGAM, s, l, f
from matplotlib import lines
import six
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list
def gen_missing_values_andimpute_or_randomsampledown(n_cells, telosPercell, df):
max_telos = n_cells * telosPercell
half_telos = (n_cells * telosPercell) / 2
if df.size > max_telos:
df_sampled = df.sample(max_telos)
return df_sampled
if df.size > 25 and df.size <= half_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, replace=True, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
if df.size > 25 and df.size < max_telos:
missing_data_difference = abs( (n_cells * telosPercell) - df.size )
rsampled = df.sample(missing_data_difference, random_state=28)
concat_ed = pd.concat([rsampled, df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
return concat_ed
else:
return df
def clean_individ_telos(telo_data):
labels=[6, 172, 338, 504, 670, 836, 1002, 1168, 1334, 1500, 1666, 1832,
1998, 2164, 2330, 2496, 2662, 2828, 2994, 3160, 3326, 3492, 3658, 3824,
3990, 4156, 4322, 4488, 4654, 4820]
labels_offset_by6 = [(x-6) for x in labels]
telo_data = telo_data.drop(labels_offset_by6)
telo_data = | pd.to_numeric(telo_data.iloc[:,0], errors='coerce') | pandas.to_numeric |
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
load:
url:
interpolate_limit:
time_shift_for_large_gaps:
manual_adjustments:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`load_cf`
Inputs
------
Outputs
-------
- ``resource/time_series_60min_singleindex_filtered.csv``:
"""
import logging
logger = logging.getLogger(__name__)
#from _helpers import configure_logging
import re
import pandas as pd
import numpy as np
import json
from shapely.geometry import LineString,Point
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_absolute_error
#-----------------#
# utils functions #
#-----------------#
def unpack_param(df):
if 'param' in df.columns:
new = df.param.apply(string2list)
df.lat=df.lat.apply(string2list)
df.long=df.long.apply(string2list)
return df.merge(pd.DataFrame(list(new)), left_index=True, right_index=True)
else:
return 0
def string2list(string, with_None=True):
p = re.compile('(?<!\\\\)\'')
string = p.sub('\"', string)
if with_None:
p2 = re.compile('None')
string = p2.sub('\"None\"', string)
return json.loads(string)
#use to create geo object
def change2linestring(df):
# rows level process
df['linestring']=[]
for index in range(len(df['lat'])):
df['linestring'].append((df['long'][index],df['lat'][index]))
df['linestring']=LineString(df['linestring'])
return df
def addLinestring(df):
#dataframe level process
df=df.reset_index(drop=True)
df['linestring']='L'
df=df.apply(change2linestring,axis=1)
return df
def recalculate_pipe_capacity(pipe_diameter_mm):
"""Calculate pipe capacity based on diameter.
20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV)
24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV)
36 inch (900 mm) 50 bar -> 11.25 GW CH4 pipe capacity (LHV)
48 inch (1200 mm) 80 bar -> 21.7 GW CH4 pipe capacity (LHV)
Based on p.15 of (https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf"""
# slope
m0 = (5-1.5) / (600-500)
m1 = (11.25-5)/(900-600)
m2 = (21.7-11.25)/(1200-900)
if np.isnan(pipe_diameter_mm):
return np.nan
if pipe_diameter_mm<500:
return np.nan
if pipe_diameter_mm<600 and pipe_diameter_mm>=500:
return -16 + m0 * pipe_diameter_mm
if pipe_diameter_mm<900 and pipe_diameter_mm>=600:
return -7.5 + m1 * pipe_diameter_mm
else:
return -20.1 + m2 * pipe_diameter_mm
def convert_gas_to_hydrogen_capacity(gas_capacity):
return gas_capacity/3
def convert_gasVolume2hydrogenCapacity(df):
R_s = 518.4
# temperature [Kelvin] (assuming 10°Celsius)
T = 10 + 273.15
# density [kg/m^3]= pressure [kg/ms^2] / (T * R_s), 1 bar = 1e5 kg/(ms^2)
pressure = df.max_pressure_bar.fillna(45)
density = pressure * 1e5 / (T * R_s)
# mass flow [kg/ h], Mega = 1e6,
#mass_flow = df.max_cap_M_m3_per_d * 1e6 / 8760 * density
mass_flow = df.max_cap_M_m3_per_d * 1e6 / 24 * density
# gross calorific value (GCV in ENTSOT table) [kWh/kg]
gcv_lgas = 38.3 / 3.6
gcv_hgas = 47.3 / 3.6
# energy cap [MW] = mass_flow [kg/h] * gcv [kWh/kg] * 1e-3
energy_cap = mass_flow * 1e-3
energy_cap.loc[df.is_H_gas==1] *= gcv_hgas
energy_cap.loc[df.is_H_gas!=1] *= gcv_lgas
return energy_cap*1e-3 # to gw
#-----------------#
# main functions #
#-----------------#
def load_preprocessing_dataset(IGGINL_df_path, entsog_df_path, EMAP_df_path):
# --------------------------------------------
#load&prepocess IGGINL df
#--------------------------------------------
IGGINL = | pd.read_csv(IGGINL_df_path, sep=';') | pandas.read_csv |
import pandas as pd
import numpy as np
import random
from human_ISH_config import *
import math
import os
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
#DATA_DIR: This is defined in human_ISJ_config.py. This is the directory you have defined to store all the data.
PATH_TO_SZ_STUDY = os.path.join(DATA_DIR, "schizophrenia")
PATH_TO_SZ_POST_PROCESS = os.path.join(PATH_TO_SZ_STUDY, "post_process_on_sz")
def get_sz_labels_image_and_donor_level(label):
"""
This function is used to select a certain column from the info csv file to be later used as a label in downstream tasks.
The main columns that we were interested are: "description" and "smoker"
"description" indicates whether the donor was case or control, and "smoker" indicates whether they smoked or not.
This information is available from the Allen website.
:param label: string. The column name to be used as label
:return: None
"""
path_to_sz_info = os.path.join(PATH_TO_SZ_STUDY, "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_sz_info)
if label == 'description':
new_df = pd.DataFrame(columns=['ID', label])
# --------------- image level ---------------
new_df['ID'] = sz_info_df['image_id']
diagnosis = list(sz_info_df['description'])
image_sz_count = 0
image_no_sz_count = 0
for i in range(len(diagnosis)):
if "schizophrenia" in diagnosis[i]:
diagnosis[i] = True
image_sz_count +=1
elif "control" in diagnosis[i]:
diagnosis[i] = False
image_no_sz_count +=1
else:
diagnosis[i] = None
new_df[label] = diagnosis
file_name = "sz_diagnosis_as_label_image_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
print ("image sz count: ", image_sz_count)
print ("image no sz count: ", image_no_sz_count)
print ("total: ", image_sz_count + image_no_sz_count)
# --------------- donor level ---------------
group_by_donor = sz_info_df.groupby('donor_id')
donor_list=[]
diagnosis_list = []
donor_sz_count = 0
donor_no_sz_count = 0
for key, item in group_by_donor:
donor_list.append(key)
diagnosis = list(item['description'])[0]
if "schizophrenia" in diagnosis:
diagnosis_list.append(True)
donor_sz_count +=1
elif "control" in diagnosis:
diagnosis_list.append(False)
donor_no_sz_count +=1
else:
diagnosis_list.append(None)
new_df = pd.DataFrame(columns=['ID', label])
new_df['ID']= donor_list
new_df[label] = diagnosis_list
file_name = "sz_diagnosis_as_label_donor_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
print ("donor sz count: ", donor_sz_count)
print ("donor no sz count: ", donor_no_sz_count)
print ("total: ", donor_sz_count + donor_no_sz_count)
elif label in ['donor_age', 'donor_sex', 'smoker', 'pmi', 'tissue_ph', 'donor_race']:
new_df = pd.DataFrame(columns=['ID', label])
# --------------- image level ---------------
new_df['ID'] = sz_info_df['image_id']
new_df[label] = list(sz_info_df[label])
file_name = label + "_as_label_image_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
# --------------- donor level ---------------
group_by_donor = sz_info_df.groupby('donor_id')
donor_list = []
label_list = []
for key, item in group_by_donor:
donor_list.append(key)
label_list.append(list(item[label])[0])
new_df = pd.DataFrame(columns=['ID', label])
new_df['ID'] = donor_list
new_df[label] = label_list
file_name = label + "_as_label_donor_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
def embeddings_per_gene_per_donor(path_to_per_gene_per_donor_level_files, input_type, ts, embeddings_df):
"""
This function gets an image-level embedding file and outputs a donor-level csv file for each gene.
Each gene will have a separate csv file: gene_name.csv
Each row in the csv file will represent a donor.
The number of rows in the csv file is the number of donors on which this specific gene was tested.
We will use image level embeddings, then group them by gene. So each group will be all the images that assay the same gene.
Then, within each group, we will group the images again by donor_id and use the mean() function to take the average of the embeddings.
:param path_to_per_gene_per_donor_level_files: the path in which per gene donor-level files should be saved.
The directory will be created if it doesn't alredy exist.
:param input_type: str. Determine the type of input vectors.
Could be: ['embed','demog','demog_and_embed','random','plain_resnet']
:param ts: str. The timestamp that indicates which files to use.
:param embeddings_df: pandas data frame. Image-level embeddings.
:return: a list of genes
"""
# the embeddings are image level
path_to_sz_info = os.path.join(PATH_TO_SZ_STUDY, "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_sz_info)
# add two extra columns: gene_symbol and donor_id to the embeddings
# if the file already has donor_id, don't add it
left = embeddings_df
left_cols = list(embeddings_df)
right = sz_info_df
if 'donor_id' in left_cols:
merge_res = pd.merge(left, right[['image_id', 'gene_symbol']], how='left', on='image_id')
else:
merge_res = pd.merge(left, right[['image_id','gene_symbol', 'donor_id']], how='left', on='image_id')
genes = list(merge_res['gene_symbol'].unique())
if input_type == 'random' or input_type == 'resnet':
# random and resnet do not require a timestamp
if (not os.path.exists(path_to_per_gene_per_donor_level_files)):
os.mkdir(path_to_per_gene_per_donor_level_files)
per_gene_per_donor_path = os.path.join(path_to_per_gene_per_donor_level_files, input_type + "_per_gene_per_donor")
if (not os.path.exists(per_gene_per_donor_path)):
os.mkdir(per_gene_per_donor_path)
else:
if (not os.path.exists(path_to_per_gene_per_donor_level_files)):
os.mkdir(path_to_per_gene_per_donor_level_files)
per_gene_per_donor_path = os.path.join(path_to_per_gene_per_donor_level_files, ts+ "_" + input_type +"_per_gene_per_donor")
if (not os.path.exists(per_gene_per_donor_path)):
os.mkdir(per_gene_per_donor_path)
group_by_gene = merge_res.groupby('gene_symbol')
for key, item in group_by_gene:
# key is gene_symbol
# item is the group of images that assay that gene
item = item.drop(columns=['image_id'])
group_by_donor = item.groupby('donor_id').mean()
gene_name = key
group_by_donor.to_csv(os.path.join(per_gene_per_donor_path, gene_name + ".csv"))
return genes
def demog_info_as_training(list_of_columns_to_get, path_to_image_level_embeddings, ts):
"""
For every image, it extracts the demographics info and adds them as new columns to the embeddings.
For 'smoker', 'donor_sex', and 'donor_race', it performs one-hot coding.
Everything needs to be image-level
Once we have the image-level embeddings, we can then aggregate to donor-level
:param path_to_image_level_embeddings: str. Patch to image-level embeddings. These are the embeddings that you want
to concatenae the demographic info to.
:param list_of_columns_to_get: list of demographic info columns to use
:param ts: timestamp indicating which model's embeddings to use
:return: None
"""
path_to_sz_info = os.path.join(PATH_TO_SZ_STUDY, "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_sz_info)
list_of_columns_to_get = ['image_id', 'donor_id'] + list_of_columns_to_get
demog_df = sz_info_df[list_of_columns_to_get]
# ------ handle one-hot encoding ------
columns_needing_one_hot = ['smoker', 'donor_sex', 'donor_race']
keep = []
for item in columns_needing_one_hot:
if item in list_of_columns_to_get:
keep.append(item)
one_hot_dfs = []
for item in keep:
item_one_hot = pd.get_dummies(demog_df[item], prefix=item)
one_hot_dfs.append(item_one_hot)
for item in one_hot_dfs:
demog_df = pd.concat([demog_df, item], axis=1)
smoker_one_hot = pd.get_dummies(demog_df['smoker'], prefix='smoker')
sex_one_hot = pd.get_dummies(demog_df['donor_sex'], prefix='sex')
race_one_hot = pd.get_dummies(demog_df['donor_race'], prefix='race')
demog_df = demog_df.drop(columns=['smoker', 'donor_sex', 'donor_race'])
demog_df = pd.concat([demog_df, smoker_one_hot], axis=1)
demog_df = pd.concat([demog_df, sex_one_hot], axis=1)
demog_df = pd.concat([demog_df, race_one_hot], axis=1)
# -------------------------------------
file_name = ts+ "_demog_info_as_training_image_level.csv"
demog_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
grouped_df = demog_df.groupby(['donor_id']).mean()
grouped_df = grouped_df.drop(columns=['image_id'])
file_name = ts + "_demog_info_as_training_donor_level.csv"
grouped_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name))
# ---- merge with image-level embeddings ----
embeds_df = pd.read_csv( path_to_image_level_embeddings)
left = embeds_df
right = demog_df
merged_res = pd.merge(left, right, how='left', on='image_id')
file_name = ts + "_demog_info_and_embeddings_as_training_image_level.csv"
merged_res.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
grouped_df = merged_res.groupby(['donor_id']).mean()
grouped_df = grouped_df.drop(columns=['image_id'])
file_name = ts + "_demog_info_and_embeddings_as_training_donor_level.csv"
grouped_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name))
# -------------------------------------
def perform_logistic_regression(path_to_embed_file, path_to_labels_file,level, n_splits =5, n_jobs = 1):
"""
This function performs logistic regression on a given dataset.
:param path_to_embed_file: str. Path to the donor-level embedding file that will be used as training input for the logistic regression model.
:param path_to_labels_file: str. Path to the file that will be used as ground truth lables for the logistic regression model.
:param level: the aggregation level of embeddings. It could be 'donor', 'gene', or 'image'
:param n_splits: number of splits for cross-validation
:param n_jobs: number of jobs
:return: a pandas data frame that has the AUC and f1 score.
"""
embeds_df = pd.read_csv(path_to_embed_file)
labels = pd.read_csv(path_to_labels_file)
labels = labels.rename(columns={'ID': level+'_id'})
left = embeds_df
right = labels
merge_res = pd.merge(left, right, how='left', on=level+"_id")
scores = []
skf = StratifiedKFold(n_splits=n_splits)
col_titles = list(embeds_df)[1:]
#col_titles = [str(item) for item in range(128)]
X = merge_res[col_titles]
Y = merge_res['disease_diagnosis']
f1_score_values = []
auc_values = []
for i, (train_idx, test_idx) in enumerate(skf.split(X, Y)):
model = LogisticRegression(penalty='none', n_jobs=n_jobs, max_iter=1000)
X_train = X.iloc[train_idx, :]
y_train = Y.iloc[train_idx]
X_test = X.iloc[test_idx, :]
y_test = Y.iloc[test_idx]
model.fit(X_train, y_train)
# Extract predictions from fitted model
preds = list(model.predict(X_test))
# probs for classes ordered in same manner as model.classes_
# model.classes_ >> array([False, True])
probas = pd.DataFrame(model.predict_proba(
X_test), columns=model.classes_)
# Get metrics for each model
f1 = f1_score(y_test, preds)
auc = roc_auc_score(y_test, probas[True])
f1_score_values.append(f1)
auc_values.append(auc)
print ("Finished fold: ", i+1)
print ("----" * 20)
f1 = np.mean(f1_score_values)
auc = np.mean(auc_values)
print ("FINAL: ", f1, auc)
measures = {'level': level,
'f1': f1,
'AUC': auc}
scores.append(measures)
return | pd.DataFrame(scores,columns=['level', 'AUC', 'f1']) | pandas.DataFrame |
import numpy as np
import os
import io
import glob
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
import itertools
import pickle
from collections import defaultdict
from sklearn.manifold import TSNE
from dtaidistance import dtw_ndim
from tslearn.metrics import cdist_dtw
from data.dataloader import Dataloader
from data.datagenerator import DatasetGenerator
from models.Blocks import get_positional_encoding
from matplotlib.widgets import MultiCursor, Slider, Button, TextBox
from util.misc import init_tf_gpus
color_pallete = ["#e6194B", "#ffe119", "#4363d8", "#f58231", "#42d4f4", "#f032e6", "#fabebe", "#469990", "#e6beff", "#9A6324", "#000000", "#800000", "#aaffc3", "#000075", "#a9a9a9", "#ffffff", "#3cb44b"]
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(2, 2))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
def plot_generated_signals(signals_0, signals_1):
n_samples=5#len(signals_0)
colors = ["blue", "red", "green"]
x = range(len(signals_0[0]))
n_signals = len(signals_0[0][0])
fig, axs = plt.subplots(n_samples, 2, sharex=True)
for sample in range(n_samples):
for sig in range(n_signals):
axs[sample, 0].plot(x, signals_0[sample, :, sig], color=colors[sig])
axs[sample, 1].plot(x, signals_1[sample, :, sig], color=colors[sig])
plt.tight_layout()
return fig
def collect_labels(data):
arousal = []
valence = []
for _, label in data:
arousal.append(label.numpy()[0])
valence.append(label.numpy()[1])
labels = {"arousal": arousal,
"valence": valence}
labels = | pd.DataFrame(labels) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@created: 01/29/21
@modified: 01/29/21
@author: <NAME>
CentraleSupelec
MICS laboratory
9 rue <NAME>, Gif-Sur-Yvette, 91190 France
Defines internal classes user-level functions for building and plotting double heatmaps.
"""
import copy
from dataclasses import dataclass, field
import numpy as np
import pandas as pd
import os
from typing import Dict, List, Tuple, Union
from tqdm import tqdm
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import statsmodels.sandbox.stats.multicomp as mp
from scipy.stats import fisher_exact
# type aliases
DataFrame = pd.core.frame.DataFrame
Vector = Union[List[float], np.ndarray, pd.core.series.Series]
Table = Union[List[List[float]], np.ndarray, pd.core.frame.DataFrame]
def _cont_table(A: Vector, B: Vector) -> Table:
"""
Compute 2 x 2 contignecy table from A and B binary vectors.
Parameters
----------
A: array-like
A vector of binary entries
B: array-like
A vector of binary entries
Returns
-------
tab: array-like
A 2x2 contigency table.
"""
tab = np.zeros((2, 2))
A_anti = np.where(A==1, 0, 1)
B_anti = np.where(B==1, 0, 1)
tab[0,0] = np.sum(A*B)
tab[0,1] = np.sum(A*B_anti)
tab[1,0] = np.sum(A_anti*B)
tab[1,1] = np.sum(A_anti*B_anti)
return tab
def _odds_ratio(tab: Table) -> float:
"""
Computes the odds ratio of a contigency table
-------------------
a b
c d
-------------------
as (a/b)/(c/d) or ad/bc
Parameters
----------
tab: array-like
The table.
Returns
-------
_odds_ratio: float
"""
if tab[0,1] == 0 or tab[1,0] == 0:
_odds_ratio = tab[0,0] * tab[1,1] / max(tab[1,0], tab[0,1], 1)
else:
_odds_ratio = tab[0,0] * tab[1,1] / (tab[1,0] * tab[0,1])
return _odds_ratio
class _DoubleHeatmapBuild(object):
def __init__(self, pair_count="cooccurrence", pair_ratio="odds", pair_test="fisher_exact"):
"""
Parameters
----------
pair_count: str, default="cooccurrence"
Either a string or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_ratio: str, default="odds"
Either a string, a dataframe or a callable taking as input two iterables of the same size (lists or arrays)
and that returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_test: str, default="fisher_exact"
Either a string None or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a p-value. Pairs that have a significant test will have a star above their cell.
"""
self.pair_count = pair_count
self.pair_ratio = pair_ratio
self.pair_test = pair_test
def _pair_count(self, A, B):
if isinstance(self.pair_ratio, str) and self.pair_count == "cooccurrence":
assert set(A).issubset(set([0,1]))
assert set(B).issubset(set([0,1]))
return sum((A==1) & (B==1))
elif isinstance(self.pair_count, Callable):
return self.pair_count(A,B)
else:
raise ValueError("Invalid value for parameter 'pair_count'. Specify a Callable or one of 'cooccurrence'")
def _pair_ratio(self, A, B):
if isinstance(self.pair_ratio, str) and self.pair_ratio == "odds":
c_tab = _cont_table(A, B)
ratio = _odds_ratio(c_tab)
return ratio
elif isinstance(self.pair_ratio, Callable):
return self.pair_ratio(A,B)
else:
raise ValueError("Invalid value for parameter 'pair_ratio'. Specify a Callable or one of 'cooccurrence'")
def _pair_test(self, A, B):
if self.pair_test is None:
return None
if type(self.pair_test) == str and self.pair_test == "fisher_exact":
c_tab = _cont_table(A, B)
_, pval = fisher_exact(c_tab)
return pval
else:
return self.pair_test(A,B)
def _build_half_matrix(self, df, pair, use_diagonal=True):
"""
Builds a half matrix of size (n_var, n_var) from a matrix of size (n_obs, n_var).
Parameters
----------
df: array-like, (n_obs, n_var)
It defines the values used to build the half matrix
pair:
A callable function taking as input two iterables of the same size (lists or arrays) and that returns a
float. For each pair of variables, the float will be fill the half-matrix.
Returns
-------
half_df:
Half-filled matrix
"""
vars = df.columns.tolist()
n_vars = len(vars)
m_half = []
if use_diagonal:
for i in tqdm(range(n_vars)):
l_half = [np.nan for _ in range(n_vars)]
for j in range(0, i + 1):
l_half[j] = pair(df[vars[i]], df[vars[j]])
m_half.append(l_half)
else:
m_half.append([np.nan for _ in range(n_vars)])
for i in tqdm(range(1, n_vars)):
l_half = [np.nan for _ in range(n_vars)]
for j in range(0, i):
l_half[j] = pair(df[vars[i]], df[vars[j]])
m_half.append(l_half)
df_half = pd.DataFrame(m_half, vars)
df_half.columns = df.columns
return df_half
def build_half_matrices(self, df_values, df_active=None):
"""
Builds one, two or three half-matrices from a matrix of activation and a matrix of values of size (n_obs, n_var).
Each half-matrix is a square matrix of size (n_var, n_var).
Parameters
----------
df_values: array-like, (n_obs, n_var)
It defines the values used to build the half matrices of ratios and tests in observations x variables
format.
df_active: array-like, (n_obs, n_var) default=None
If None, df_active=df_values. It defines the binary activation indicator of variables in sample used to
build the half matrix of counts in observations x variables format.
Returns
-------
dfs: dict of dataframe
Dict containing the half-matrices of "count", "ratio" and "test"
"""
if df_active is None:
df_active = df_values
if self.pair_count is None:
df_count = None
else:
df_count = self._build_half_matrix(df_active, self._pair_count)
if self.pair_ratio is None:
df_ratio = None
else:
df_ratio = self._build_half_matrix(df_values, self._pair_ratio, use_diagonal=False)
if self.pair_test is None:
df_test = None
else:
df_test = self._build_half_matrix(df_values, self._pair_test, use_diagonal=False)
return {"count": df_count, "ratio": df_ratio, "test": df_test}
def build_double_heatmap(df_values, df_active=None, pair_count="cooccurrence", pair_ratio="odds",
pair_test="fisher_exact"):
"""
Builds one, two or three half-matrices from a matrix of activation and a matrix of values of size (n_obs, n_var).
Each half-matrix is a square matrix of size (n_var, n_var).
Parameters
----------
pair_count: str, default="cooccurrence"
Either a string or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_ratio: str, default="odds"
Either a string, a dataframe or a callable taking as input two iterables of the same size (lists or arrays)
and that returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_test: str, default="fisher_exact"
Either a string None or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a p-value. Pairs that have a significant test will have a star above their cell.
Parameters
----------
df_values: array-like, (n_obs, n_var)
It defines the values used to build the half matrices of ratios and tests in observations x variables
format.
df_active: array-like, (n_obs, n_var) default=None
If None, df_active=df_values. It defines the binary activation indicator of variables in sample used to
build the half matrix of counts in observations x variables format.
Returns
-------
dfs: dict of dataframe
Dict containing the half-matrices of "count", "ratio" and "test"
"""
builder = _DoubleHeatmapBuild(pair_count, pair_ratio, pair_test)
return builder.build_half_matrices(df_values, df_active)
def default_field(obj):
return field(default_factory=lambda: obj)
@dataclass
class DoubleHeatmapConfig:
figure: Dict[str, Union[str,tuple]] = default_field({
"figsize": (8,8),
"dpi": 300,
"n_grid": 10,
})
heatmap: Dict[str, Union[int, str, bool, float]] = default_field({
"orientation" : "antidiagonal",
"xticklabels" : True,
"yticklabels" : True,
"ticks_labelsize" : 8,
"xticks_labelrotation" : 90,
"yticks_labelrotation" : 0,
"linecolor" : "white",
"linewidths" : 0.5,
"square" : True,
})
legend: Dict[str, Union[int, float, str]] = default_field({
'edgecolor': 'k',
'fancybox': False,
'facecolor': 'w',
'fontsize': 10,
'framealpha': 1,
'frameon': False,
'handle_length': 1,
'handle_height': 1.125,
'title_fontsize': 12,
})
count: Dict[str, Union[int, float, str, bool]] = default_field({
'boundaries' : [1,5,10,15,20,50,200,500],
'auto_boundaries' : {"n": 7, "decimals": 0, "middle": None, "regular": True},
'cmap' : sns.color_palette("Blues", n_colors=7, as_cmap=True),
'cbar_fraction' : 0.25,
'cbar_aspect' : None,
'cbar_reverse' : True,
'cbar_xy' : (0, 0.5),
'cbar_title' : "Counts",
'cbar_title_fontsize' : 12,
'cbar_title_pad' : 6,
'cbar_ticks_rotation' : 0,
'cbar_ticks_length' : 5,
'cbar_ticks_labelsize': 8,
'cbar_ticks_pad' : 4,
})
ratio: Dict[str, Union[int, float, str]] = default_field({
'boundaries' : [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'auto_boundaries' : {"n": 7, "decimals": 0, "middle": None, "regular": True},
'cmap' : sns.diverging_palette(50, 200, s=90, l=50, sep=1, as_cmap=True),
'cbar_fraction' : 0.25,
'cbar_aspect' : None,
'cbar_reverse' : False,
'cbar_xy' : (0.5, 0.1),
'cbar_title' : "Ratios",
'cbar_title_pad' : 6,
'cbar_title_fontsize' : 12,
'cbar_ticks_rotation' : 0,
'cbar_ticks_length' : 5,
'cbar_ticks_labelsize': 8,
'cbar_ticks_pad' : 4,
})
test: Dict[str, Union[int, float, str]] = default_field({
'pval_level': 0.05,
'fwer_level': 0.05,
'fdr_level': 0.1,
'fwer_size': 10,
'fwer_marker': '*',
'fwer_color': 'black',
'fdr_size': 1,
'fdr_marker': 's',
'fdr_color': 'black',
})
class _DoubleHeatmapPlot(object):
def __init__(self, df_count: DataFrame, df_ratio: DataFrame, df_test: DataFrame, config: DoubleHeatmapConfig):
"""
Plots double heatmap.
Parameters
----------
df_count: pandas.core.frame.DataFrame
Pandas half-filled dataframe of counts.
df_ratio: pandas.core.frame.DataFrame
Pandas half-filled dataframe of ratios.
df_test: pandas.core.frame.DataFrame
Pandas half-filled dataframe of p-values.
config: DoubleHeatmapConfig
Graphical parameters.
"""
self.df_count = df_count.copy()
self.df_ratio = df_ratio.copy()
self.df_test = df_test.copy()
self.n_var = self.df_count.shape[0]
self.config = config
self._check_config(config)
self._automatic_config()
def _check_config(self, config):
for cmap in [self.config.ratio["cmap"], self.config.count["cmap"]]:
if not isinstance(config.ratio["cmap"], cm.colors.LinearSegmentedColormap):
raise ValueError("""Please specify color maps of that are instances of LinearSegmentedColormap
as produced by the sns.color_palette with cmap=True function for instance""")
if self.config.heatmap["orientation"] not in ["diagonal", "antidiagonal"]:
raise ValueError("%s is invalid for heatmap orientation. Choose 'diagonal' or 'antidiagonal'" %
self.config.heatmap["orientation"] == "antidiagonal")
def _automatic_boundaries(self, df, use_diagonal=True, n=9, middle=None, decimals=1):
if use_diagonal:
vals = np.array([self.df_ratio.iloc[i,j] for i in range(self.n_var) for j in range(i)])
else:
vals = np.array([self.df_ratio.iloc[i,j] for i in range(1,self.n_var) for j in range(i-1)])
min_val = np.round(min(vals), decimals=decimals)
max_val = np.round(max(vals), decimals=decimals)
if middle is not None:
below_middle = pd.qcut(vals[vals < middle], q=(n-1)//2).categories.mid.values
below_middle = np.round(below_middle, decimals=decimals)
above_middle = pd.qcut(vals[vals > middle], q=(n-1)//2).categories.mid.values
above_middle = np.round(above_middle, decimals=decimals)
boundaries = [min_val] + below_middle + [middle] + above_middle + [min_val]
else:
inbetween = | pd.qcut(vals, q=n-1) | pandas.qcut |
import pandas as pd
from SALib.analyze.radial_ee import analyze as ee_analyze
from SALib.analyze.sobol_jansen import analyze as jansen_analyze
from SALib.plotting.bar import plot as barplot
# results produced with
# python launch.py --specific_inputs oat_mc_10_samples.csv --num_cores 48
# python launch.py --specific_inputs oat_cim_extremes.csv --num_cores 2
# python launch.py --specific_inputs moat_10_samples.csv --num_cores 46
from .settings import *
data_dir = indir
problem = {
'num_vars': 53,
'names': ['Farm___Crops___variables___Dryland_Winter_Barley___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Barley___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Barley___yield_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Canola___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Canola___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Canola___yield_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Wheat___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Wheat___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Wheat___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Barley___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Barley___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Barley___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Canola___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Canola___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Canola___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Wheat___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Wheat___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Wheat___yield_per_Ha',
'Farm___Fields___soil___zone_10___TAW_mm',
'Farm___Fields___soil___zone_11___TAW_mm',
'Farm___Fields___soil___zone_12___TAW_mm',
'Farm___Fields___soil___zone_1___TAW_mm',
'Farm___Fields___soil___zone_2___TAW_mm',
'Farm___Fields___soil___zone_3___TAW_mm',
'Farm___Fields___soil___zone_4___TAW_mm',
'Farm___Fields___soil___zone_5___TAW_mm',
'Farm___Fields___soil___zone_6___TAW_mm',
'Farm___Fields___soil___zone_7___TAW_mm',
'Farm___Fields___soil___zone_8___TAW_mm',
'Farm___Fields___soil___zone_9___TAW_mm',
'Farm___Irrigations___Gravity___cost_per_Ha',
'Farm___Irrigations___Gravity___head_pressure',
'Farm___Irrigations___Gravity___irrigation_efficiency',
'Farm___Irrigations___Gravity___pumping_cost_per_ML',
'Farm___Irrigations___PipeAndRiser___cost_per_Ha',
'Farm___Irrigations___PipeAndRiser___head_pressure',
'Farm___Irrigations___PipeAndRiser___irrigation_efficiency',
'Farm___Irrigations___PipeAndRiser___pumping_cost_per_ML',
'Farm___Irrigations___Spray___cost_per_Ha',
'Farm___Irrigations___Spray___head_pressure',
'Farm___Irrigations___Spray___irrigation_efficiency',
'Farm___Irrigations___Spray___pumping_cost_per_ML',
'Farm___zone_10___Irrigation', 'Farm___zone_11___Irrigation',
'Farm___zone_2___Irrigation', 'Farm___zone_4___Irrigation',
'Farm___zone_6___Irrigation', 'Farm___zone_7___Irrigation',
'Farm___zone_8___Irrigation', 'Farm___zone_9___Irrigation',
'policy___goulburn_allocation_scenario', 'policy___gw_cap',
'policy___gw_restriction'],
'bounds': [(0.80008164104, 1.49988829764),
(1.50055050742, 2.99888102069),
(1.5019032420200003, 3.4997506932099998),
(0.800586478968, 1.4996985073),
(2.50048002895, 5.9984797603299995),
(0.801052350325, 2.59824297051),
(0.800504246618, 1.49975544648),
(2.5014981435299997, 5.9979681912),
(1.5004709810799999, 5.99716646463),
(0.800280272497, 1.49937425734),
(1.5009590614, 2.9992559947000004),
(2.50329796931, 6.996816011819999),
(0.800211596215, 1.49974890273),
(2.0025975557, 5.99742468979),
(1.3008100600299999, 4.99958661017),
(0.8000586077680001, 1.7993585851400002),
(2.50005748529, 5.99920182664),
(1.5021921746899998, 7.99719295089),
(150.013080285, 199.99630294),
(145.01266211, 184.97447762599998),
(145.036691741, 184.96132256099997),
(145.017973816, 184.964659778),
(145.009985077, 184.987775366),
(100.017759932, 159.950281059),
(100.00893349, 159.939807798),
(150.002663759, 199.995911171),
(150.049539279, 199.966206716),
(75.011883698, 109.982509833),
(100.007801344, 159.986958043),
(145.015806747, 184.983072651),
(2000.04766978, 2499.9660698000002),
(8.00489093285, 14.999582054100001),
(0.500092622216, 0.8998440697460001),
(8.0072724319, 14.9995752798),
(2000.65212205, 3299.41488388),
(8.00365090987, 14.9983740134),
(0.600018657025, 0.899703908987),
(8.005434387660001, 14.9933485659),
(2500.62094903, 3499.76177012),
(25.0039236705, 34.9957834096),
(0.7001056060199999, 0.8998137827079999),
(30.000316497100002, 59.9914045149),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 2.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 2.0),
(0.0, 2.0),
(0.600156362739, 0.999676343195),
(0.0, 1.0)]
}
def collect_results(problem, oat_length, reps, np_res, numeric_vals):
jansen_results_df = pd.DataFrame()
ee_results_df = pd.DataFrame()
rep_length = oat_length * reps
_, cols = np_res.shape
for col in range(cols):
cn = col_names[col]
res = np_res[:rep_length, col]
si = jansen_analyze(problem, res, reps, seed=101)
js_df = si.to_df()
js_df.columns = ['{}_{}'.format(cn, suf) for suf in js_df.columns]
jansen_results_df = pd.concat([jansen_results_df, js_df], axis=1)
si = ee_analyze(problem, numeric_vals[:rep_length],
res, reps, seed=101)
ee_df = si.to_df()
ee_df.columns = ['{}_{}'.format(cn, suf) for suf in ee_df.columns]
ee_results_df = pd.concat([ee_results_df, ee_df], axis=1)
return jansen_results_df, ee_results_df
# End collect_results()
def plot_results(jansen_results_df, ee_results_df, target_metric):
# STs = [c for c in jansen_results_df.columns if '_conf' not in c and target_metric in c]
idx = [True if 'irrigation' in r.lower() else False for r in jansen_results_df.index]
# ax = jansen_results_df.loc[idx, STs].plot(kind='bar', figsize=(10,6))
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
tgt_idx = [c for c in ee_results_df.columns if target_metric.lower() in c.lower()]
ax = ee_results_df.loc[idx, tgt_idx].plot(kind='bar', figsize=(10,6))
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# End plot_results()
template_df = pd.read_csv(f'{data_dir}example_sample.csv', index_col=0)
is_perturbed = (template_df != template_df.iloc[0]).any()
perturbed_cols = template_df.loc[:, is_perturbed].columns
target_num_vars = problem['num_vars']
oat_length = target_num_vars + 1
target_metric = "SW Allocation Index"
### Extreme values without interactions ###
numeric_samples = pd.read_csv(f'{data_dir}extreme_numeric_samples.csv', index_col=0)
numeric_samples = numeric_samples[perturbed_cols]
numeric_vals = numeric_samples.values
extreme_results = pd.read_csv(f'{data_dir}no_irrigation_extreme_results.csv', index_col=0)
np_res = extreme_results.values
col_names = extreme_results.columns
extreme_results = {}
for i in range(len(col_names)):
x_diff = (numeric_vals[0, :] - numeric_vals[1, :])
y_diff = (np_res[0, i] - np_res[1, i])
extreme_results[col_names[i]] = y_diff / x_diff
# End for
no_ext_results = pd.DataFrame(extreme_results, index=perturbed_cols).T
no_ext_results.columns = [c.replace('Farm___Irrigations___', '') for c in no_ext_results.columns]
tgt_cols = [c for c in no_ext_results.columns if 'gravity___irrigation_efficiency' in c.lower()]
# no_ext_results.loc[tgt_idx, tgt_cols].plot(kind='bar', legend=None)
### Extremes with interactions ###
extreme_results = pd.read_csv(f'{data_dir}with_irrigation_extreme_results.csv', index_col=0)
np_res = extreme_results.values
col_names = extreme_results.columns
extreme_results = {}
for i in range(len(col_names)):
x_diff = (numeric_vals[0, :] - numeric_vals[1, :])
y_diff = (np_res[0, i] - np_res[1, i])
extreme_results[col_names[i]] = abs(y_diff / x_diff)
# End for
with_ext_results = pd.DataFrame(extreme_results, index=perturbed_cols).T
tgt_idx = [c for c in with_ext_results.index if target_metric.lower() in c.lower()]
with_ext_results.columns = [c.replace('Farm___Irrigations___', '') for c in with_ext_results.columns]
tgt_cols = [c for c in with_ext_results.columns if 'gravity___irrigation_efficiency' in c.lower()]
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1,2, figsize=(12,4), sharey=True)
no_ext_results.loc[tgt_idx, tgt_cols].plot(kind='bar', legend=None,
title='Disabled interactions',
ax=axes[0])
axes[0].set_ylabel('Absolute Change')
with_ext_results.loc[tgt_idx, tgt_cols].plot(kind='bar',
legend=None,
title='Enabled interactions',
ax=axes[1]).legend(
bbox_to_anchor=(1.75, 0.65)
)
fig.suptitle("Extremity Testing", x=0.4, y=1.05, fontsize=14)
fig.tight_layout()
fig.savefig(f'{fig_dir}extremity_testing_results.png', dpi=300, bbox_inches='tight')
### Larger samples
# Without irrigation interaction with SW model
numeric_samples = pd.read_csv(f'{data_dir}oat_mc_10_numeric_samples.csv', index_col=0)
numeric_samples = numeric_samples[perturbed_cols]
numeric_vals = numeric_samples.values
oat_10_no_irrigation_results = pd.read_csv(f'{data_dir}oat_no_irrigation_10_results.csv', index_col=0)
np_res = oat_10_no_irrigation_results.values
mu_star_col = target_metric + '_mu_star'
sigma_col = target_metric + '_sigma'
fig, axes = plt.subplots(1,2, figsize=(12,4), sharey=True)
res = {'mu_star': {}, 'sigma': {}}
tgt_param = 'Farm___Irrigations___Gravity___irrigation_efficiency'
for reps in range(1, 11):
jansen_results_df, ee_results_df = collect_results(problem, oat_length, reps, np_res, numeric_vals)
runs = reps * oat_length
res['mu_star'][runs] = ee_results_df.loc[tgt_param, mu_star_col]
res['sigma'][runs] = ee_results_df.loc[tgt_param, sigma_col]
oat_no_interaction = pd.DataFrame(data=res)
oat_no_interaction.plot(kind='bar', ax=axes[0], title='Disabled Interactions')
# With irrigation interaction with SW model
oat_10_with_irrigation_results = pd.read_csv(f'{data_dir}oat_with_irrigation_10_results.csv', index_col=0)
np_res = oat_10_with_irrigation_results.values
res = {'mu_star': {}, 'sigma': {}}
for reps in range(1, 11):
jansen_results_df, ee_results_df = collect_results(problem, oat_length, reps, np_res, numeric_vals)
runs = reps * oat_length
res['mu_star'][runs] = ee_results_df.loc[tgt_param, mu_star_col]
res['sigma'][runs] = ee_results_df.loc[tgt_param, sigma_col]
oat_with_interaction = | pd.DataFrame(data=res) | pandas.DataFrame |
#!/usr/bin/env python
import os
import glob
import sys
import shutil
import pdb
import re
from argparse import ArgumentParser
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.insert(0,'..')
import ESM_utils as esm
from scipy.optimize import curve_fit
from sklearn.preprocessing import MinMaxScaler
def create_ab_prob_all_visits_df(file_paths, genetic_df, clinical_df, pib_df):
ab_prob_df_list = []
for i, fp in enumerate(file_paths):
ab_curr_prob_df = pd.read_csv(file_paths[i], index_col=0)
visit = file_paths[i].split(".")[-2].split("_")[-1]
ab_curr_prob_df.loc[:, 'visit'] = visit
#drop participants that did not pass QC according to PUP's PET processing
for sub in ab_curr_prob_df.index:
if not ((pib_df['IMAGID'] == sub) & (pib_df['visit'] == visit)).any():
ab_curr_prob_df = ab_curr_prob_df[ab_curr_prob_df.index != sub]
ab_prob_df_list.append(ab_curr_prob_df)
#concatenate all dataframes
ab_prob_all_visits_df = pd.concat(ab_prob_df_list)
#add metadata to the dataframe
ab_prob_all_visits_df = add_metadata_to_amyloid_df(ab_prob_all_visits_df,
genetic_df,
clinical_df)
return ab_prob_all_visits_df
def add_metadata_to_amyloid_df(df, genetic_df, clinical_df):
for sub in df.index:
sub_df = df[df.index == sub]
visits = list(sub_df.visit)
mutation = genetic_df[(genetic_df.IMAGID == sub)].Mutation.values[0]
for i in range(0, len(visits)):
visit = visits[i]
dian_eyo = clinical_df[(clinical_df.IMAGID == sub) & (clinical_df.visit == visit)].DIAN_EYO.values
age = clinical_df[(clinical_df.IMAGID == sub) & (clinical_df.visit == visit)].VISITAGEc.values
if len(dian_eyo) == 0:
print(sub + " " + visit)
if len(dian_eyo) > 0:
df.loc[(df.index == sub) & (df.visit == visit), "DIAN_EYO"] = dian_eyo[0]
df.loc[(df.index == sub) & (df.visit == visit), "VISITAGEc"] = age[0]
df.loc[(df.index == sub) & (df.visit == visit), "visitNumber"] = i + 1
df.loc[(df.index == sub) & (df.visit == visit), "Mutation"] = mutation
return df
def get_rois_to_analyze(roi_colnames, rois_to_exclude):
roi_cols_to_exclude = []
for col in roi_colnames:
for rte in rois_to_exclude:
if rte in col.lower():
roi_cols_to_exclude.append(col)
roi_cols_to_keep = [x for x in roi_cols if x not in roi_cols_to_exclude]
return roi_cols_keep, roi_cols_to_exclude
def exclude_subcortical_rois(df, roi_cols_to_exclude):
df[roi_cols_to_exclude] = 0
return df
def stripplot_subcortical_mc_nc(ab_prob_df):
plt.figure(figsize=(10,10))
nrows = 2
ncols = 2
subcortical_rois = ["Left Thalamus", "Left Caudate", "Left Putamen", "Left Globus Pallidus"]
for i, roi in enumerate(subcortical_rois):
j = i + 1
plt.subplot(nrows, ncols, j)
sns.stripplot(x="Mutation", y=roi, data=ab_prob_df, size=3)
plt.title(roi, fontsize=12)
plt.ylabel("")
#plt.xticks(["Noncarrier", "Mutation Carrier"])
plt.tight_layout()
plt.savefig(os.path.join("../../figures", "mc_nc_roi_stripplot.png"))
plt.close()
def sort_df(ab_prob_df):
# sort subjects
ind_sorter = pd.DataFrame(ab_prob_df,copy=True)
ind_sorter.loc[:,'mean'] = ab_prob_df.mean(axis=1)
ind_order = ind_sorter.sort_values('mean',axis=0,ascending=True).index
# column sorter
col_sorter = pd.DataFrame(ab_prob_df,copy=True)
col_sorter.loc['mean'] = ab_prob_df.mean(axis=0)
col_order = col_sorter.sort_values('mean',axis=1,ascending=False).columns
ab_prob_df_sorted = ab_prob_df.loc[ind_order, col_order]
return ab_prob_df_sorted
def fsigmoid(x, a, b):
# Define sigmoid function
return 1.0 / (1.0 + np.exp(-a*(x-b)))
def zscore_mc_nc(ab_prob_df_mc, ab_prob_df_nc, roi_cols):
ab_prob_df_mc_zscore = ab_prob_df_mc.copy()
for roi in roi_cols:
mc_roi_vals = ab_prob_df_mc.loc[:, roi]
nc_roi_vals = ab_prob_df_nc.loc[:, roi]
mc_roi_vals_zscore = (mc_roi_vals-nc_roi_vals.mean())/nc_roi_vals.std()
ab_prob_df_mc_zscore.loc[:, roi] = np.absolute(mc_roi_vals_zscore)
scaler = MinMaxScaler()
ab_prob_df_mc_zscore[roi_cols] = scaler.fit_transform(ab_prob_df_mc_zscore[roi_cols])
return ab_prob_df_mc_zscore
def sigmoid_normalization(ab_prob_df):
'''
For each ROI, a sigmoidal function is fit to the values across all
individuals to estimate the parameters of a sigmoid for this ROI.
The original ROI signal is rescaled by a multiple of this sigmoid
(1/2 the original signal + 1/2 orig * sigmoid).
ab_prob_df -- A subject x ROI matrix of AB binding probabilities (pandas DataFrame).
'''
# sort the original signal first
ab_prob_df_sorted = sort_df(ab_prob_df)
ab_prob_df_scaled = pd.DataFrame(index=ab_prob_df_sorted.index, columns=ab_prob_df_sorted.columns)
for roi in ab_prob_df_sorted.columns:
vals = ab_prob_df_sorted[roi]
vals_idx = np.arange(0, len(vals))
popt, pcov = curve_fit(fsigmoid, vals_idx, vals, method='dogbox', bounds=([0,0],[1, len(vals)]))
x = np.linspace(0, len(vals), num=len(vals))
y = fsigmoid(x, *popt)
# wt1 and wt2 correspond to how much we're scaling the contribution of original
# and rescaled signals
wt1, wt2 = 1, 1
vals_scaled = (wt1*y + wt2*vals) / 2
ab_prob_df_scaled.loc[:, roi] = vals_scaled.values
ab_prob_df_scaled = ab_prob_df_scaled.loc[ab_prob_df.index, ab_prob_df.columns]
return ab_prob_df_scaled
def plot_roi_sub_heatmap(ab_prob_df, roi_cols):
path = os.path.join("../../figures/roi_sub_heatmap.png")
esm.Plot_Probabilites(ab_prob_df[roi_cols], cmap="Spectral_r", figsize=(20,10), path=path)
def main(args):
parser = ArgumentParser()
parser.add_argument("--ab_prob_matrix_dir",
help="Please pass the files directory containing the PiB-PET probability matrices")
parser.add_argument("--esm_input_file",
help="Please provide desired ESM input filename.")
parser.add_argument("--connectivity_type",
help="Specify type of connectivity, e.g. FC or ACP",
default="ACP")
parser.add_argument("--epicenters_for_esm",
help="Please provide a list of regions to test as \
epicenters (all lower-case)",
nargs="+",
type=str,
default=None)
parser.add_argument("--zscore",
help="Should the amyloid beta probabilities be z-scored.",
default=False,
type=bool)
parser.add_argument("--threshold",
help="Should the amyloid beta probabilities be thresholded.",
default=False,
type=bool)
parser.add_argument("--scale",
type=bool,
default=False,
help="Should the amyloid beta probabilities be within ROI sigmoid normalized.")
parser.add_argument("--visitNumber",
default=1,
type=int)
results = parser.parse_args() if args is None else parser.parse_args(args)
#results = parser.parse_args(args)
ab_prob_matrix_dir = results.ab_prob_matrix_dir
print(ab_prob_matrix_dir)
esm_input_file = results.esm_input_file
connectivity_type = results.connectivity_type
epicenters_for_esm = results.epicenters_for_esm
zscore = results.zscore
scale = results.scale
threshold = results.threshold
visitNumber = results.visitNumber
if connectivity_type == "ACP":
conn_file = '../../data/DIAN/connectivity_matrices/Matrix_ACP.mat'
elif connectivity_type == "FC":
conn_file = '../../data/DIAN/connectivity_matrices/DIAN_FC_NC_Correlation_Matrix_Avg_ReducedConfounds.mat'
if scale == True:
esm_input_file = esm_input_file + "_scaled"
file_paths = sorted(glob.glob(ab_prob_matrix_dir))
pib_df = pd.read_csv("../../data/DIAN/participant_metadata/pib_D1801.csv")
genetic_df = | pd.read_csv("../../data/DIAN/participant_metadata/GENETIC_D1801.csv") | pandas.read_csv |
#testing_framework.py
#This script is to evaluate an arbitrary number of classifier objects and output the results
#Build a class that takes in a model object and outputs a dataframe with the predictions
#Benefits of this approach are that we can initialize the class a single time, then feed different datasets in to test
import pandas as pd
import numpy as np
from dev_testing.testing_funs import FitPipelines
from sklearn.datasets import load_boston, load_diabetes
from sklearn import linear_model
import lightgbm as lgb
from transformations.transformations import DiaPoly
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestRegressor
dataset = load_boston()
X, y = dataset.data, dataset.target
X = | pd.DataFrame(X) | pandas.DataFrame |
from simulationClasses import DCChargingStations, Taxi, Bus, BatterySwappingStation
import numpy as np
import pandas as pd
from scipy import stats, integrate
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import DateFormatter, HourLocator, MinuteLocator, AutoDateLocator
import seaborn as sns
import csv
import sys
from datetime import datetime,date,timedelta
import random
from math import ceil
import math
sns.set_context("paper")
sns.set(font_scale=2)
sns.set_style("whitegrid", {
"font.family": "serif",
"font.serif": ["Times", "Palatino", "serif"],
'grid.color': '.9',
'grid.linestyle': '--',
})
taxiChargingStation = DCChargingStations(5)
taxiFleet =[]
for i in range(100):
newTaxi = Taxi()
newTaxi.useSwapping = 0
taxiFleet.append(newTaxi)
busChargingStation = DCChargingStations(5)
busFleet = []
for i in range(20):
newBus = Bus()
newBus.useSwapping = 0
busFleet.append(newBus)
time = 0
taxiIncome = []
busIncome = []
taxiChargerIncome = []
busChargerIncome = []
while time < 24*60*7:
tempTaxiFleet = []
todayTaxiIncome = 0
todayBusIncome = 0
for runningTaxi in taxiFleet:
runningTaxi.decideChargeMode(time)
if runningTaxi.chargingMode == 1:
taxiChargingStation.addCharge(runningTaxi)
else:
runningTaxi.getTravelSpeed(time)
tempTaxiFleet.append(runningTaxi)
taxiFleet = tempTaxiFleet
tempChargingVehicles = []
for chargingTaxi in taxiChargingStation.chargingVehicles:
chargingTaxi.decideChargeMode(time)
if chargingTaxi.chargingMode == 0:
chargingTaxi.getTravelSpeed(time)
taxiFleet.append(chargingTaxi)
else:
chargingTaxi.charge(time,0,taxiChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingTaxi)
taxiChargingStation.chargingVehicles = tempChargingVehicles
while taxiChargingStation.numberOfStations - len(taxiChargingStation.chargingVehicles) > 0:
if len(taxiChargingStation.pendingVehicles) > 0:
newChargeTaxi = taxiChargingStation.pendingVehicles.pop(0)
newChargeTaxi.charge(time,0,taxiChargingStation.chargeSpeed)
taxiChargingStation.chargingVehicles.append(newChargeTaxi)
else:
break
taxiChargingStation.charge()
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
busChargingStation.addCharge(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempChargingVehicles = []
for chargingBus in busChargingStation.chargingVehicles:
chargingBus.decideChargeMode(time)
if chargingBus.chargingMode == 0:
chargingBus.getTravelSpeed(time)
busFleet.append(chargingBus)
else:
chargingBus.charge(time, 0, busChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingBus)
busChargingStation.chargingVehicles = tempChargingVehicles
while busChargingStation.numberOfStations - len(busChargingStation.chargingVehicles) > 0:
if len(busChargingStation.pendingVehicles) > 0:
newChargeBus = busChargingStation.pendingVehicles.pop(0)
newChargeBus.charge(time, 0, busChargingStation.chargeSpeed)
busChargingStation.chargingVehicles.append(newChargeBus)
else:
break
busChargingStation.charge()
for taxi in taxiFleet + taxiChargingStation.chargingVehicles + taxiChargingStation.pendingVehicles:
todayTaxiIncome += taxi.income
for bus in busFleet + busChargingStation.chargingVehicles + busChargingStation.pendingVehicles:
todayBusIncome += bus.income
taxiIncome.append([time,todayTaxiIncome,len(taxiFleet),len(taxiChargingStation.chargingVehicles),len(taxiChargingStation.pendingVehicles)])
busIncome.append([time,todayBusIncome,len(busFleet),len(busChargingStation.chargingVehicles),len(busChargingStation.pendingVehicles)])
taxiChargerIncome.append([time,taxiChargingStation.income])
busChargerIncome.append([time, busChargingStation.income])
time += 1
taxiIncomeDataFrame = | pd.DataFrame(taxiIncome,columns=["time","income","running","charging","waiting"]) | pandas.DataFrame |
import time
import os
import math
import argparse
from glob import glob
from collections import OrderedDict
import random
import warnings
from datetime import datetime
import yaml
import gc
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import pandas as pd
import joblib
import cv2
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from skimage.io import imread
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
import torch.backends.cudnn as cudnn
import torchvision
from albumentations.augmentations import transforms
from albumentations.core.composition import Compose, OneOf, KeypointParams
from albumentations.pytorch.transforms import ToTensor
from albumentations.core.transforms_interface import NoOp
from lib.datasets import PoseDataset
from lib.utils.utils import *
from lib.models.model_factory import get_pose_model
from lib.optimizers import RAdam
from lib.decodes import decode
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default=None,
help='model name: (default: arch+timestamp)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch_size', default=32, type=int,
metavar='N', help='mini-batch size (default: 32)')
# model
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
help='model architecture: (default: resnet18)')
parser.add_argument('--input_w', default=224, type=int)
parser.add_argument('--input_h', default=224, type=int)
parser.add_argument('--freeze_bn', default=False, type=str2bool)
parser.add_argument('--rot', default='trig', choices=['eular', 'trig', 'quat'])
# loss
parser.add_argument('--loss', default='L1Loss')
# optimizer
parser.add_argument('--optimizer', default='RAdam')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum')
parser.add_argument('--weight_decay', default=0, type=float,
help='weight decay')
parser.add_argument('--nesterov', default=False, type=str2bool,
help='nesterov')
# scheduler
parser.add_argument('--scheduler', default='CosineAnnealingLR',
choices=['CosineAnnealingLR', 'ReduceLROnPlateau', 'MultiStepLR'])
parser.add_argument('--min_lr', default=1e-5, type=float,
help='minimum learning rate')
parser.add_argument('--factor', default=0.1, type=float)
parser.add_argument('--patience', default=2, type=int)
parser.add_argument('--milestones', default='1,2', type=str)
parser.add_argument('--gamma', default=2/3, type=float)
# dataset
parser.add_argument('--cv', default=False, type=str2bool)
parser.add_argument('--n_splits', default=5, type=int)
# augmentation
parser.add_argument('--hflip', default=False, type=str2bool)
parser.add_argument('--hflip_p', default=0.5, type=float)
parser.add_argument('--shift', default=True, type=str2bool)
parser.add_argument('--shift_p', default=0.5, type=float)
parser.add_argument('--shift_limit', default=0.1, type=float)
parser.add_argument('--scale', default=True, type=str2bool)
parser.add_argument('--scale_p', default=0.5, type=float)
parser.add_argument('--scale_limit', default=0.1, type=float)
parser.add_argument('--hsv', default=True, type=str2bool)
parser.add_argument('--hsv_p', default=0.5, type=float)
parser.add_argument('--hue_limit', default=20, type=int)
parser.add_argument('--sat_limit', default=0, type=int)
parser.add_argument('--val_limit', default=0, type=int)
parser.add_argument('--brightness', default=True, type=str2bool)
parser.add_argument('--brightness_p', default=0.5, type=float)
parser.add_argument('--brightness_limit', default=0.2, type=float)
parser.add_argument('--contrast', default=True, type=str2bool)
parser.add_argument('--contrast_p', default=0.5, type=float)
parser.add_argument('--contrast_limit', default=0.2, type=float)
parser.add_argument('--iso_noise', default=False, type=str2bool)
parser.add_argument('--iso_noise_p', default=0.5, type=float)
parser.add_argument('--clahe', default=False, type=str2bool)
parser.add_argument('--clahe_p', default=0.5, type=float)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--resume', action='store_true')
args = parser.parse_args()
return args
def train(config, train_loader, model, criterion, optimizer, epoch):
avg_meter = AverageMeter()
model.train()
pbar = tqdm(total=len(train_loader))
for i, (input, target) in enumerate(train_loader):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target.float())
# compute gradient and do optimizing step
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_meter.update(loss.item(), input.size(0))
postfix = OrderedDict([('loss', avg_meter.avg)])
pbar.set_postfix(postfix)
pbar.update(1)
pbar.close()
return avg_meter.avg
def validate(config, val_loader, model, criterion):
avg_meters = {'loss': AverageMeter()}
# switch to evaluate mode
model.eval()
with torch.no_grad():
pbar = tqdm(total=len(val_loader))
for i, (input, target) in enumerate(val_loader):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = 0
losses = {}
loss = criterion(output, target.float())
losses['loss'] = loss
avg_meters['loss'].update(losses['loss'].item(), input.size(0))
postfix = OrderedDict([('loss', avg_meters['loss'].avg)])
pbar.set_postfix(postfix)
pbar.update(1)
pbar.close()
return avg_meters['loss'].avg
def main():
config = vars(parse_args())
if config['name'] is None:
config['name'] = '%s_%s' % (config['arch'], datetime.now().strftime('%m%d%H'))
if not os.path.exists('models/pose/%s' % config['name']):
os.makedirs('models/pose/%s' % config['name'])
if config['resume']:
with open('models/pose/%s/config.yml' % config['name'], 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['resume'] = True
with open('models/pose/%s/config.yml' % config['name'], 'w') as f:
yaml.dump(config, f)
print('-'*20)
for key in config.keys():
print('- %s: %s' % (key, str(config[key])))
print('-'*20)
cudnn.benchmark = True
df = pd.read_csv('inputs/train.csv')
img_ids = df['ImageId'].values
pose_df = | pd.read_csv('processed/pose_train.csv') | pandas.read_csv |
""" Stockgrid View """
__docformat__ = "numpy"
import logging
from typing import List, Tuple
import pandas as pd
import requests
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_dark_pool_short_positions(sort_field: str, ascending: bool) -> pd.DataFrame:
"""Get dark pool short positions. [Source: Stockgrid]
Parameters
----------
sort_field : str
Field for which to sort by, where 'sv': Short Vol. (1M),
'sv_pct': Short Vol. %%, 'nsv': Net Short Vol. (1M),
'nsv_dollar': Net Short Vol. ($100M), 'dpp': DP Position (1M),
'dpp_dollar': DP Position ($1B)
ascending : bool
Data in ascending order
Returns
----------
pd.DataFrame
Dark pool short position data
"""
d_fields_endpoints = {
"sv": "Short+Volume",
"sv_pct": "Short+Volume+%25",
"nsv": "Net+Short+Volume",
"nsv_dollar": "Net+Short+Volume+$",
"dpp": "Dark+Pools+Position",
"dpp_dollar": "Dark+Pools+Position+$",
}
field = d_fields_endpoints[sort_field]
if ascending:
order = "asc"
else:
order = "desc"
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_data?top={field}&minmax={order}"
response = requests.get(link)
df = pd.DataFrame(response.json()["data"])
df = df[
[
"Ticker",
"Date",
"Short Volume",
"Short Volume %",
"Net Short Volume",
"Net Short Volume $",
"Dark Pools Position",
"Dark Pools Position $",
]
]
return df
@log_start_end(log=logger)
def get_short_interest_days_to_cover(sort_field: str) -> pd.DataFrame:
"""Get short interest and days to cover. [Source: Stockgrid]
Parameters
----------
sort_field : str
Field for which to sort by, where 'float': Float Short %%,
'dtc': Days to Cover, 'si': Short Interest
Returns
----------
pd.DataFrame
Short interest and days to cover data
"""
link = "https://stockgridapp.herokuapp.com/get_short_interest?top=days"
r = requests.get(link)
df = pd.DataFrame(r.json()["data"])
d_fields = {
"float": "%Float Short",
"dtc": "Days To Cover",
"si": "Short Interest",
}
df = df[
["Ticker", "Date", "%Float Short", "Days To Cover", "Short Interest"]
].sort_values(
by=d_fields[sort_field],
ascending=bool(sort_field == "dtc"),
)
return df
@log_start_end(log=logger)
def get_short_interest_volume(ticker: str) -> Tuple[pd.DataFrame, List]:
"""Get price vs short interest volume. [Source: Stockgrid]
Parameters
----------
ticker : str
Stock to get data from
Returns
----------
pd.DataFrame
Short interest volume data
List
Price data
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={ticker}"
response = requests.get(link)
df = pd.DataFrame(response.json()["individual_short_volume_table"]["data"])
df["date"] = pd.to_datetime(df["date"])
return df, response.json()["prices"]["prices"]
@log_start_end(log=logger)
def get_net_short_position(ticker: str) -> pd.DataFrame:
"""Get net short position. [Source: Stockgrid]
Parameters
----------
ticker: str
Stock to get data from
Returns
----------
pd.DataFrame
Net short position
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={ticker}"
response = requests.get(link)
df = pd.DataFrame(response.json()["individual_dark_pool_position_data"])
df["dates"] = | pd.to_datetime(df["dates"]) | pandas.to_datetime |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
| assert_series_equal(result, ts[:0]) | pandas.util.testing.assert_series_equal |
from flask import Flask, render_template, request
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
sns.set_style("whitegrid")
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn import datasets, linear_model
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from pandas import datetime
import h5py
import warnings
warnings.filterwarnings('ignore')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('login.html')
@app.route('/index', methods=['GET','POST'])
def index():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('index.html')
else:
k=1
return render_template('login.html',k=k)
else:
k=1
return render_template('login.html',k=k)
@app.route('/data_viz')
def data_viz():
return render_template("data_viz.html")
@app.route('/file_upload')
def file_upload():
return render_template("file_upload.html")
@app.route('/upload_printed', methods=['GET','POST'])
def upload_printed():
abc=request.files['printed_doc']
test1=pd.read_csv(abc)
test=test1
train=pd.read_csv('train.csv')
#test=pd.read_csv('test.csv')
store=pd.read_csv('stores.csv')
feature=pd.read_csv('features.csv')
print("\nEXPLORING store.csv")
print("\n",store.head().append(store.tail()),"\n")
print("Structure of Store:\n",store.shape, "\n")
print("Number of missing values:\n",store.isnull().sum().sort_values(ascending=False),"\n")
print("\nEXPLORING feature.csv")
print(feature.head().append(feature.tail()),"\n")
print("Structure of Feature: ",feature.shape,"\n")
print("Summary Statistic:\n",feature.describe(),"\n")
print("Number of missing values:\n",feature.isnull().sum().sort_values(ascending=False),"\n")
print("\nFINDING OUT THE MISSING PERCENTAGE OF DATA ACROSS EACH FEATURE")
feature_percent_missing = feature.isnull().sum()*100/len(feature)
feature_data_type = feature.dtypes
feature_summary = pd.DataFrame({"Percent_missing": feature_percent_missing.round(2),
"Datatypes": feature_data_type})
print('\n',feature_summary)
print("\nEXPLORING train.csv")
print(train.head().append(train.tail()),"\n")
print("Structure of train:\n",train.shape,"\n")
print("Summary Statistic:\n",train.describe(),"\n")
print("Number of missing values:\n",train.isnull().sum().sort_values(ascending=False),"\n")
print("\nEXPLORING test.csv")
print(test.head().append(test.tail()),"\n")
print("Structure of test:\n",test.shape,"\n")
print("Summary Statistic:\n",test.describe(),"\n")
print("Number of missing values:\n",test.isnull().sum().sort_values(ascending=False),"\n")
print('\nJOINING TABLES:')
combined_train = pd.merge(train, store, how="left", on="Store")
combined_test = pd.merge(test, store, how="left", on="Store")
combined_train = pd.merge(combined_train, feature, how = "inner", on=["Store","Date"])
combined_test = pd.merge(combined_test, feature, how = "inner", on=["Store","Date"])
combined_train = combined_train.drop(["IsHoliday_y"], axis=1)
combined_test = combined_test.drop(["IsHoliday_y"], axis=1)
print(combined_train.head(),"\n", combined_train.shape,"\n")
print(combined_test.head(),"\n", combined_test.shape,"\n")
print(combined_train.describe())
print(combined_test.describe())
print('\nDATA PREPROCESSING:')
print('\nREPLACING MISSING VALUES BY 0')
print('\nCHECKING FOR THE TOTAL NUMBER OF MISSING VALUES IN combined_train AND combined_test AND THEN REPLACING THEM WITH 0')
print(combined_test.isnull().sum())
print(combined_train.isnull().sum())
processed_train = combined_train.fillna(0)
processed_test = combined_test.fillna(0)
print('\nREPLACING NEGATIVE MARKDOWN EVENTS BY 0 IN processed_train AND processed_test')
processed_train.loc[processed_train['Weekly_Sales'] < 0.0,'Weekly_Sales'] = 0.0
processed_train.loc[processed_train['MarkDown2'] < 0.0,'MarkDown2'] = 0.0
processed_train.loc[processed_train['MarkDown3'] < 0.0,'MarkDown3'] = 0.0
print('\n',processed_train.describe())
processed_test.loc[processed_test['MarkDown1'] < 0.0,'MarkDown1'] = 0.0
processed_test.loc[processed_test['MarkDown2'] < 0.0,'MarkDown2'] = 0.0
processed_test.loc[processed_test['MarkDown3'] < 0.0,'MarkDown3'] = 0.0
processed_test.loc[processed_test['MarkDown5'] < 0.0,'MarkDown5'] = 0.0
print('\n',processed_test.describe())
print('\nPERFORMING ONE HOT ENCODING FOR CATEGORICAL DATA AND BOOLEAN DATA:')
print('\n',processed_train.dtypes, processed_test.dtypes)
cat_col = ['IsHoliday_x','Type']
for col in cat_col:
lbl = preprocessing.LabelEncoder()
lbl.fit(processed_train[col].values.astype('str'))
processed_train[col] = lbl.transform(processed_train[col].values.astype('str'))
for col in cat_col:
lbl = preprocessing.LabelEncoder()
lbl.fit(processed_test[col].values.astype('str'))
processed_test[col] = lbl.transform(processed_test[col].values.astype('str'))
processed_test.to_csv("Processed_data/processed_test.csv", index=False)
print('\n',processed_test.head())
processed_train = processed_train[['Store', 'Dept', 'Date', 'Unemployment', 'IsHoliday_x', 'Type', 'Size',
'Temperature', 'Fuel_Price', 'MarkDown1', 'MarkDown2', 'MarkDown3',
'MarkDown4', 'MarkDown5', 'CPI', 'Weekly_Sales']]
processed_train.to_csv("Processed_data/processed_train.csv", index=False)
print('\n',processed_train.head())
'''print('\nVISUALIZATION OF HISTORIC DATA:')
store['Type'].value_counts().plot(kind='bar')
plt.title('Total number of each type of stores')
plt.xlabel('Type')
plt.ylabel('Number of Stores')
plt.show()
a=sns.catplot(x="Type", y="Size", data=store);
a.fig.suptitle('Sizes of each type of store')
a=train[['Store', 'Dept']].drop_duplicates()
a.plot(kind='scatter', x='Store',y='Dept')
plt.title('Departments across every store')
print('\nPLOTTING CORRELATION HEATMAP:')
corr=processed_train.corr()
sns.heatmap(corr, xticklabels=corr.columns,yticklabels=corr.columns)
print('\nPLOTTING CORRELATION MATRIX:')
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
def magnify():
return [dict(selector="th",props=[("font-size", "7pt")]),
dict(selector="td",props=[('padding', "0em 0em")]),
dict(selector="th:hover",props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",props=[('max-width', '200px'),('font-size', '12pt')])]
corr.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '10pt'})\
.set_caption("Hover to magify")\
.set_precision(2)\
.set_table_styles(magnify())'''
dfabc=processed_train[['Date','Store','Dept','IsHoliday_x','Unemployment','Fuel_Price','Temperature','Type','MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5','CPI','Weekly_Sales']]
dfabc["MarkDownValue"] = dfabc["MarkDown1"].add(dfabc["MarkDown2"])
dfabc["MarkDownValue"] = dfabc["MarkDownValue"].add(dfabc["MarkDown3"])
dfabc["MarkDownValue"] = dfabc["MarkDownValue"].add(dfabc["MarkDown4"])
dfabc["MarkDownValue"] = dfabc["MarkDownValue"].add(dfabc["MarkDown5"])
dfabc = dfabc[dfabc.MarkDownValue != 0.0]
dfdef=processed_test[['Date','Store','Dept','IsHoliday_x','Type','MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5','CPI']]
dfdef["MarkDownValue"] = dfdef["MarkDown1"].add(dfdef["MarkDown2"])
dfdef["MarkDownValue"] = dfdef["MarkDownValue"].add(dfdef["MarkDown3"])
dfdef["MarkDownValue"] = dfdef["MarkDownValue"].add(dfdef["MarkDown4"])
dfdef["MarkDownValue"] = dfdef["MarkDownValue"].add(dfdef["MarkDown5"])
dfdef = dfdef[dfdef.MarkDownValue != 0.0]
dfx=dfabc
dfx=pd.get_dummies(dfx, columns=['Dept','Store','Type'])
dfx['Day']=dfx['Date'].str[0:2]
dfx['Month']=dfx['Date'].str[3:5]
dfx['Year']=dfx['Date'].str[6:10]
dfx['Day']=pd.to_numeric(dfx['Day'])
dfx['Month']=pd.to_numeric(dfx['Month'])
dfx['Year']=pd.to_numeric(dfx['Year'])
dftest=dfdef
dftest=pd.get_dummies(dftest, columns=['Dept','Store','Type'])
dftest['Day']=dftest['Date'].str[0:2]
dftest['Month']=dftest['Date'].str[3:5]
dftest['Year']=dftest['Date'].str[6:10]
dftest['Day']=pd.to_numeric(dftest['Day'])
dftest['Month']= | pd.to_numeric(dftest['Month']) | pandas.to_numeric |
import pandas as pd
import numpy as np
import xgboost as xgb
train_data_df = pd.read_csv('train.csv')
test_data_df = | pd.read_csv('test.csv') | pandas.read_csv |
import inspect
import operator as op
from typing import *
import pandas as pd
import pypika as pk
from dateutil.relativedelta import relativedelta
from pandas.io.formats.style import Styler
from pypika import DatePart # noqa
from pypika import Order # noqa
from pypika import Case, Criterion # noqa
from pypika import CustomFunction as cfn # noqa
from pypika import MSSQLQuery as Query # noqa
from pypika import Table as T # noqa
from pypika import functions as fn # noqa
from pypika.analytics import RowNumber # noqa
from pypika.terms import PseudoColumn # noqa
from seaborn import diverging_palette
from smseventlog import config as cf
from smseventlog import date, delta, dt
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog import styles as st
from smseventlog.database import db
from smseventlog.utils import dbmodel as dbm
log = getlog(__name__)
week_letter = 'W'
"""
- Queries control how data is queried/filtered from database.
- Can be consumed by tables/views, reports, etc
- da is 'default args' to be passed to filter when query is executed
"""
class Filter():
def __init__(self, parent):
# fltr has to belong to a query object
criterion, fields = {}, {}
select_table = parent.select_table
f.set_self(vars())
def add(self, field=None, val=None, vals=None, opr=None, term=None, table=None, ct=None):
"""Add query filter
Parameters
----------
field : str, optional
field to filter on, default None
val : str, optional
val to filter on, default None
vals : dict, optional
dict of {field: val}, default None
opr : operator, optional
eg opr.eq, default None
term : str, optional
filter term eg "between", default None
table : str | pk.Table, optional
table if not using query's default table, default None
ct : pk.Criterion, optional
fully formed criterion, eg multiple statements or "or" etc, default None
Returns
-------
qr.Filter
self
"""
if not vals is None:
# not pretty, but pass in field/val with dict a bit easier
field = list(vals.keys())[0]
val = list(vals.values())[0]
if table is None:
table = self.select_table
elif isinstance(table, str):
table = T(table)
field_ = table.field(field)
if ct is None:
if not term is None:
func = getattr(field_, term)
# between
if val:
ct = func(*val)
else:
ct = func()
elif isinstance(val, str):
val = val.replace('*', '%')
if '%' in val:
ct = field_.like(val)
else:
if opr is None:
opr = op.eq
ct = opr(field_, val)
elif isinstance(val, (int, float)):
if opr is None:
opr = op.eq
ct = opr(field_, val)
elif isinstance(val, (dt, date)):
if opr is None:
opr = op.ge
ct = opr(field_, val)
self.add_criterion(ct=ct)
return self
def add_criterion(self, ct):
# check for duplicate criterion, use str(ct) as dict key for actual ct
# can also use this to pass in a completed pk criterion eg (T().field() == val)
self.criterion[str(ct)] = ct
if isinstance(ct, pk.terms.ComplexCriterion):
return # cant use fields in complexcriterion for later access but whatever
if hasattr(ct, 'left'):
field = ct.left.name
elif hasattr(ct, 'term'):
field = list(ct.term.fields_())[0].name
self.fields[field.lower()] = ct
def check_criterion(self, field):
# check if field is in criterion fields - not sure if I need this
lst = list(filter(lambda x: field.lower() in x.lower(), self.criterion))
ans = True if lst else False
return ans
def get_criterion(self, field: str) -> str:
"""Return criterion containing selected field
Parameters
----------
field : str
partial field name to match
Returns
-------
str
criterion string (need to access object through dict keys)
Examples
--------
>>> ct = fltr.get_criterion('PartName')
flter.criterion[ct] = ...
"""
lst = list(filter(lambda x: field.lower() in x.lower(), self.criterion))
ans = lst[0] if lst else None
return ans
def get_all_criterion(self):
return self.criterion.values()
def expand_criterion(self):
return Criterion.all(self.get_all_criterion())
def is_init(self):
return len(self.criterion) > 0
def print_criterion(self):
for ct in self.criterion.values():
print('\t', list(ct.tables_)[0], ct)
class QueryBase(object):
def __init__(
self,
parent=None,
minesite: str = None,
da: dict = None,
theme: str = 'light',
select_tablename: str = None,
use_cached_df: bool = False):
formats, default_dtypes, stylemap_cols = {}, {}, {}
background_gradients = []
last_sql = None
_minesite_default = 'FortHills'
# color_good = 240 if theme == 'light' else 120
cmap = diverging_palette(240, 10, sep=10, n=21, as_cmap=True)
sql = None
df = pd.DataFrame()
df_loaded = False
m = cf.config['TableName']
color = cf.config['color']
name = self.__class__.__name__
# loop base classes to get first working title, need this to map view_cols
for base_class in inspect.getmro(self.__class__):
title = m['Class'].get(base_class.__name__, None)
if not title is None:
break
# loop through base classes till we find a working select_table
if select_tablename is None:
for base_class in inspect.getmro(self.__class__):
select_tablename = m['Select'].get(base_class.__name__, None)
if not select_tablename is None:
break
select_table = T(select_tablename)
# try to get updatetable, if none set as name of select table
if not select_tablename is None:
update_tablename = m['Update'].get(name, select_tablename)
update_table = getattr(dbm, update_tablename, None)
# set dict for db > view_col conversion later
view_cols = f.get_dict_db_view(title=title)
f.set_self(vars())
self.set_fltr()
@property
def minesite(self):
# can either pass in a minesite for reports/etc, or use GUI parent's
if hasattr(self, '_minesite') and not self._minesite is None:
return self._minesite
elif not self.parent is None:
return self.parent.minesite
else:
from ..gui import _global as gbl
return gbl.get_minesite()
@minesite.setter
def minesite(self, val):
self._minesite = val
def get_sql(self, last_query=False, **kw) -> str:
"""Return sql from query object.\n
Parameters
----------
last_query : bool, optional
Refresh using last saved sql query, by default False\n
Returns
-------
str
SQL string, consumed in database.get_df
"""
if last_query:
if not self.last_sql is None:
return self.last_sql
else:
self.parent.update_statusbar('No previous query saved yet.')
return
sql, da = self.sql, self.da
if sql is None:
q = self.q
if hasattr(self, 'process_criterion'):
self.process_criterion()
if not da is None and hasattr(self, 'set_default_args'):
self.set_default_args(**da)
# NOTE could build functionality for more than one subquery
fltr2 = self.fltr2
if fltr2.is_init() and hasattr(self, 'sq0'):
self.sq0 = self.sq0.where(fltr2.expand_criterion())
if hasattr(self, 'get_query'): # need to do this after init for queries with subqueries
q = self.get_query()
# no select cols defined yet
if q.get_sql() == '':
q = q.select(*self.cols)
q = q.where(self.fltr.expand_criterion())
# allow adding a wrapper eg for row numbering, but keep filters with base query
if hasattr(self, 'wrapper_query'):
q = self.wrapper_query(q)
sql = str(q)
self.last_sql = sql
return sql
def set_fltr(self):
self.fltr = Filter(parent=self)
self.fltr2 = Filter(parent=self)
def set_lastperiod(self, days=7):
if hasattr(self, 'date_col') and not self.date_col is None:
vals = {self.date_col: dt.now().date() + delta(days=days * -1)}
self.fltr.add(vals=vals, opr=op.ge)
return True
else:
return False
def set_lastweek(self):
return self.set_lastperiod(days=7)
def set_lastmonth(self):
return self.set_lastperiod(days=31)
def get_updatetable(self):
tablename = self.select_table if self.update_table is None else self.select_table
return getattr(dbm, tablename) # db model definition, NOT instance
def add_extra_cols(self, cols: list):
"""Add extra columns to query
Parameters
----------
cols : list | string
Item(s) to add
"""
if not isinstance(cols, list):
cols = [cols]
self.cols = self.cols + cols
def add_fltr_args(self, args, subquery=False):
"""Add multiple filters to self.fltr as list
Parameters
----------
args : list
list of key:vals with opional other args
subquery : bool, optional
use self.fltr2, default False
"""
if not isinstance(args, list):
args = [args]
fltr = self.fltr if not subquery else self.fltr2
for da in args:
fltr.add(**da)
def _set_default_filter(self, do=False, **kw):
"""Just used for piping"""
if do and hasattr(self, 'set_default_filter'):
self.set_default_filter(**kw)
return self
def _set_base_filter(self, do=False, **kw):
"""Just used for piping"""
if do and hasattr(self, 'set_base_filter'):
self.set_base_filter(**kw)
return self
def process_df(self, df):
"""Placeholder for piping"""
return df
def _process_df(self, df, do=True):
"""Wrapper to allow skipping process_df for testing/troubleshooting"""
if do:
return df.pipe(self.process_df)
else:
return df
@property
def df(self):
if not self.df_loaded:
self.get_df()
return self._df
@df.setter
def df(self, data):
self._df = data
def _get_df(self, default=False, base=False, prnt=False, skip_process=False, **kw) -> pd.DataFrame:
"""Execute query and return dataframe
Parameters
----------
default : bool, optional
self.set_default_filter if default=True, default False
base : bool, optional
self.set_base_filter, default False
prnt : bool, optional
Print query sql, default False
skip_process : bool, optional
Allow skipping process_df for troubleshooting, default False
Returns
---
pd.DataFrame
"""
self._set_default_filter(do=default, **kw) \
._set_base_filter(do=base, **kw)
sql = self.get_sql(**kw)
if prnt:
print(sql)
return pd \
.read_sql(sql=sql, con=db.engine) \
.pipe(f.default_df) \
.pipe(f.convert_df_view_cols, m=self.view_cols) \
.pipe(self._process_df, do=not skip_process) \
.pipe(f.set_default_dtypes, m=self.default_dtypes)
def get_df(self, cached: bool = False, **kw) -> pd.DataFrame:
"""Wrapper for _get_df
Parameters
----------
cached : bool, default False
Use cached df if already loaded
Returns
---
pd.DataFrame
"""
if (self.use_cached_df or cached) and self.df_loaded:
return self.df
try:
self.df = self._get_df(**kw)
self.df_loaded = True
self.fltr.print_criterion()
finally:
# always reset filter after every refresh call
self.set_fltr()
return self.df
def get_stylemap(self, df: pd.DataFrame, col: str = None) -> Tuple[dict, dict]:
"""Convert irow, icol stylemap to indexes
- Consumed by datamodel set_stylemap()
Returns
------
tuple
tuple of defaultdicts bg, text colors
"""
if df.shape[0] <= 0 or not hasattr(self, 'update_style'):
return None
if col is None:
# calc style for full dataframe
style = df.style.pipe(self.update_style)
else:
# calc style for specific cols
# NOTE need to have manually defined dict of sort cols - functions per query
m = self.stylemap_cols[col]
df = df[m['cols']] # get slice of df
style = df.style.pipe(m['func'], **m.get('da', {}))
style._compute()
return st.convert_stylemap_index_color(style=style)
def subset_notnull(self, style: Styler, cols: Union[str, List[str]]) -> pd.Series:
"""Subset df column(s) to only not null rows
Parameters
----------
style : Styler
cols : Union[str, List[str]]
Returns
-------
pd.Series
true/false mask where all rows in cols are not null
"""
cols = f.as_list(cols)
return pd.IndexSlice[style.data[cols].notnull().all(axis=1), cols]
def set_minesite(self, table: str = 'UnitID'):
self.fltr.add(vals=dict(MineSite=self.minesite), table=table)
def expand_monthly_index(self, df, d_rng=None):
"""Expand monthly PeriodIndex to include missing months"""
s = df.index
if d_rng is None:
# expand to min and max existing dates
try:
d_rng = (s.min().to_timestamp(), s.max().to_timestamp() + relativedelta(months=1))
except:
log.info('No rows in monthly index to expand.')
return df
idx = pd.date_range(d_rng[0], d_rng[1], freq='M').to_period()
return df \
.merge(pd.DataFrame(index=idx), how='right', left_index=True, right_index=True) \
.rename_axis(s.name)
def table_with_args(table, args):
def fmt(arg):
if isinstance(arg, bool):
return f"'{arg}'"
elif isinstance(arg, int):
return str(arg)
else:
return f"'{arg}'"
str_args = ', '.join(fmt(arg) for arg in args.values())
return f'{table}({str_args})'
# data range funcs
def first_last_month(d):
d_lower = dt(d.year, d.month, 1)
d_upper = d_lower + relativedelta(months=1) + delta(days=-1)
return (d_lower, d_upper)
def last_day_month(d):
return first_last_month(d)[1]
def df_period(freq: str, n: int = 0, ytd: bool = False, n_years: int = 1) -> pd.DataFrame:
"""Return df of periods for specified freq
Parameters
----------
freq : str
M or W
n : int, optional
filter last n periods, default 0
ytd : bool, optional
filter periods to start of year, default False
n_years : int
number of previous years
Returns
-------
pd.DataFrame
df of periods
"""
freq = dict(month='M', week='W').get(freq, freq) # convert from month/week
d_upper = dt.now()
d_lower = d_upper + delta(days=-365 * n_years)
idx = pd.date_range(d_lower, d_upper, freq=freq).to_period()
# fmt_week = f'%Y-%{week_letter}'
fmt_week = '%G-%V'
m = dict(
W=dict(fmt_str=fmt_week),
M=dict(fmt_str='%Y-%m')) \
.get(freq)
def _rename_week(df, do=False):
if not do:
return df
return df \
.assign(name=lambda x: x.period.dt.strftime(f'Week %{week_letter}'))
def _filter_ytd(df, do=ytd):
if not do:
return df
return df[df.period >= str(df.period.max().year)]
df = pd.DataFrame(index=idx)
return df \
.assign(
start_date=lambda x: | pd.to_datetime(x.index.start_time.date) | pandas.to_datetime |
from decouple import config
import pandas as pd
import pymssql
import utility
import os
# Pobranie zmiennych srodowiskowych
server = config('SERVER')
user = config('DB_USER')
password = config('DB_PASSWORD')
database = config('DATABASE')
try:
conn = pymssql.connect(server, user, password, database)
except pymssql.OperationalError as err:
print('Error!', err)
exit(1)
# utility.print_products(conn)
sql_query = pd.read_sql_query('''
SELECT tw_Id, tw_Symbol, tw_Nazwa, tw_Opis, tc_CenaNetto7 FROM dbo.tw__Towar INNER JOIN dbo.tw_Cena ON tw_Id = tc_IdTowar WHERE tw_Zablokowany = 0
'''
, conn)
df = | pd.DataFrame(sql_query) | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import statsmodels
from matplotlib import pyplot
from scipy import stats
import statsmodels.api as sm
import warnings
from itertools import product
import datetime as dt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from pandas import DataFrame
from pandas import concat
from pandas import Series
from math import sqrt
from sklearn.metrics import mean_squared_error
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
# Inserting 0 for NA
data.fillna(0, inplace=True)
# plt.figure(figsize=[10,4])
# plt.title('BTC Price (USD) Daily')
# plt.plot(data.price, '-', label='Daily')
# Monthly
data['date'] = pd.to_datetime(data['date'])
data['date'] = data['date'].dt.tz_localize(None)
data = data.groupby([pd.Grouper(key='date', freq='M')]).first().reset_index()
data = data.set_index('date')
data['price'].fillna(method='ffill', inplace=True)
# Decomposition - only for price though!
# decomposition = sm.tsa.seasonal_decompose(data.price)
#
# trend = decomposition.trend
# seasonal = decomposition.seasonal
# residual = decomposition.resid
#
# fig = plt.figure(figsize=(10,8))
#
# plt.subplot(411)
# plt.plot(data.price, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal,label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
#
# fig.suptitle('Decomposition of Prices Data')
# plt.show()
# Setting the data structure
reframed = series_to_supervised(data, 1, 1)
# Also removing the lagged price, as this will be created in the ARIMA
reframed.drop(reframed.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
print(reframed.head())
# split data
split_date = '2018-06-25'
reframed_train = reframed.loc[reframed.index <= split_date].copy()
reframed_test = reframed.loc[reframed.index > split_date].copy()
# Prøver lige ARIMA på original data
# Det her er en seasonal ARIMA, SARIMA, så nok ekstra resultat efter en regulær ARIMA
# Hjælp til kommentering findes her: https://machinelearningmastery.com/sarima-for-time-series-forecasting-in-python/
# Den fitter fint hvis man ikke opdeler i train og test..
# Initial approximation of parameters
Qs = range(0, 2)
qs = range(0, 3)
Ps = range(0, 3)
ps = range(0, 3)
D=1
d=1
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
x_train = reframed_train.iloc[:,:-1].values
y_train = reframed_train.iloc[:,-1]
x_test = reframed_test.iloc[:,:-1].values
y_test = reframed_test.iloc[:,-1]
# Model Selection
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.statespace.SARIMAX(endog=y_train, exog=x_train, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12),enforce_stationarity=True,
enforce_invertibility=True).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
# Best Models
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
# Residual plot of the best model
fig = plt.figure(figsize=(10,4))
best_model.resid.plot()
fig.suptitle('Residual Plot of the Best Model')
print("Dickey–Fuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# Dickey–Fuller test:: p=0.xxx -> Residuals are stationary
df_month2 = data[['price']]
future = pd.DataFrame()
df_month2 = pd.concat([df_month2, future])
df_month2['forecast'] = best_model.predict(start = len(x_train), end = len(x_train)+len(x_test)-1, exog=x_test)
plt.figure(figsize=(8,4))
df_month2.price.plot()
df_month2.forecast.plot(color='r', ls='--', label='Predicted Price')
plt.legend()
plt.title('Bitcoin Prices (USD) Predicted vs Actuals, by months')
plt.ylabel('mean USD')
plt.show()
# Daily version
df = pd.read_csv('Data/All_Merged.csv')
df.isna().sum()
# Inserting 0 for NA
df.fillna(0, inplace=True)
# Date type
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'].dt.tz_localize(None)
df = df.groupby([pd.Grouper(key='date', freq='D')]).first().reset_index()
df = df.set_index('date')
df['price'].fillna(method='ffill', inplace=True)
# Setting the data structure
daily_re = series_to_supervised(df, 1, 1)
price = daily_re.iloc[:,0]
da_price = daily_re.iloc[:,0]
daily_re.drop(daily_re.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
y = daily_re.iloc[:,-1]
print(daily_re.head())
# split data
split_date = '2018-07-11'
daily_re_train = daily_re.loc[daily_re.index <= split_date].copy()
daily_re_test = daily_re.loc[daily_re.index > split_date].copy()
da_price = da_price.loc[da_price.index > split_date].copy()
da_price = da_price.values
# Som i boosting - men uden validation, kun med rolling window
pred_day = 2901-16 # Predict for this day, for the next H-1 days. Note indexing of days start from 0.
H = 30 # Forecast horizon, in days. Note there are about 252 trading days in a year
train_size = int(365 * 0.75) # Use 3 years of data as train set. Note there are about 252 trading days in a year
val_size = int(365 * 0.25)
train_val_size = train_size + val_size # Size of train+validation set
print("No. of days in train+validation set = " + str(train_val_size))
qs = range(0, 3)
ps = range(1, 3)
d = 1
parameters = product(ps, qs)
parameters_list = list(parameters)
len(parameters_list)
# ARIMA igen, men ikke som seasonal og med Training og Test data, på daily niveau
# Initial approximation of parameters
pred = pd.DataFrame()
while daily_re.index[pred_day] < daily_re.index[len(daily_re) - 1]:
x_da_train = daily_re.iloc[pred_day - train_val_size:pred_day,:-1].values
y_da_train = daily_re.iloc[pred_day - train_val_size:pred_day,-1]
x_da_test = daily_re.iloc[pred_day:pred_day + H,:-1].values
y_da_test = daily_re.iloc[pred_day:pred_day + H,-1]
# Model Selection
results = []
best_bic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.ARIMA(endog=y_da_train, exog=x_da_train, order=(param[0], d, param[1])).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
bic = model.bic
if bic < best_bic:
best_model = model
best_bic = bic
best_param = param
results.append([param, model.aic])
# Best Models
# result_table = pd.DataFrame(results)
# result_table.columns = ['parameters', 'bic']
# print(result_table.sort_values(by = 'bic', ascending=True).head())
# print(best_model.summary())
append = best_model.predict(start = len(x_da_train), end = len(x_da_train)+len(x_da_test)-1, exog=x_da_test).T
pred = pd.concat([pred, append], ignore_index=True)
pred_day = pred_day + H
pred_day = 2901-16 # Reset
price2 = price.iloc[pred_day:]
pred['prev_price'] = price2.values
pred.index = price2.index
pred['pred'] = pred.sum(axis=1)
# price2 = price2.values
# pred = pred.values
# Residual plot of the best model
# fig = plt.figure(figsize=(10,4))
# best_model.resid.plot()
# fig.suptitle('Residual Plot of the Best Model')
# print("Dickey–Fuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# Dickey–Fuller test:: p=0.001213 -> Residuals are stationary
df_month2 = df[['price']]
future = pd.DataFrame()
df_month2 = | pd.concat([df_month2, future]) | pandas.concat |
import pandas as pd
import config
from data_process import get_data
from visualization import plot_indicator, plot_monthly_return_comp_etf, plot_yearly_return_comp_etf
from trade import get_trading_records
import yfinance as yf
def run() -> None:
for indicator in config.TECHNICAL_INDICATORS:
# visualize indicators from 2021-06-01 to 2021-09-30
df = get_data(ticker="ADS.DE",indicator=indicator, start="2021-06-01", end="2021-10-01")
plot_indicator(df, config.saving_path_indicator_visualization, "ADS.DE", indicator, show_signals=False)
#trade on all dax 30 from 2020-11-01 to 2021-10-31
dax_trading_records = | pd.DataFrame() | pandas.DataFrame |
import timeit
from typing import Union
import numpy as np
import pandas as pd
import copy
from carla.evaluation.distances import get_distances
from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist
from carla.evaluation.manifold import yNN_manifold, sphere_manifold
from carla.evaluation.process_nans import remove_nans
from carla.evaluation.redundancy import redundancy
from carla.evaluation.success_rate import success_rate, individual_success_rate
from carla.evaluation.diversity import individual_diversity, avg_diversity
from carla.evaluation.violations import constraint_violation
from carla.evaluation.recourse_time import recourse_time_taken
from carla.models.api import MLModel
from carla.models.catalog import MLModelCatalog
from carla.recourse_methods.api import RecourseMethod
from carla.recourse_methods.processing import get_drop_columns_binary
class Benchmark:
"""
The benchmarking class contains all measurements.
It is possible to run only individual evaluation metrics or all via one single call.
For every given factual, the benchmark object will generate one counterfactual example with
the given recourse method.
Parameters
----------
mlmodel: carla.models.MLModel
Black Box model we want to explain
recmodel: carla.recourse_methods.RecourseMethod
Recourse method we want to benchmark
factuals: pd.DataFrame
Instances we want to find counterfactuals
Methods
-------
compute_ynn:
Computes y-Nearest-Neighbours for generated counterfactuals
compute_average_time:
Computes average time for generated counterfactual
compute_distances:
Calculates the distance measure and returns it as dataframe
compute_constraint_violation:
Computes the constraint violation per factual as dataframe
compute_redundancy:
Computes redundancy for each counterfactual
compute_success_rate:
Computes success rate for the whole recourse method.
run_benchmark:
Runs every measurement and returns every value as dict.
"""
def __init__(
self,
mlmodel: Union[MLModel, MLModelCatalog],
recourse_method: RecourseMethod,
factuals: pd.DataFrame,
dataset: pd.DataFrame = None
) -> None:
self._mlmodel = mlmodel
self._recourse_method = recourse_method
self._full_dataset = dataset
start = timeit.default_timer()
self._counterfactuals = recourse_method.get_counterfactuals(factuals)
stop = timeit.default_timer()
self._timer = stop - start
# Avoid using scaling and normalizing more than once
if isinstance(mlmodel, MLModelCatalog):
self._mlmodel.use_pipeline = False # type: ignore
self._factuals = copy.deepcopy(factuals)
# Normalizing and encoding factual for later use
self._enc_norm_factuals = recourse_method.encode_normalize_order_factuals(
factuals, with_target=True
)
def compute_ynn(self) -> pd.DataFrame:
"""
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours"]
return | pd.DataFrame([[ynn]], columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 10:32:02 2021
@author: Avram
"""
import time
import math
import numpy as np
import pandas as pd
from pathlib import Path
import glob
from .utilities import t_to_d
class PtracMod:
"""A class to store ptrac information for moderator studies."""
def __init__(self, key, thick, mode="n", folder=Path(".")):
"""Initialize TargetData class."""
self.key = key
self.mode = mode
self.thick = thick
self.data = pd.DataFrame()
self.data_axial = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 18:12:10 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
from tqdm import tqdm
from multiprocessing import cpu_count, Pool
import utils
os.system(f'rm -rf ../data')
os.system(f'mkdir ../data')
os.system(f'rm -rf ../feature')
os.system(f'mkdir ../feature')
COLUMN_TO_TYPE = {
'object_id': np.int32,
'mjd' : np.float32,
'passband' : np.int8,
'flux' : np.float32,
'flux_err' : np.float32,
'detected' : np.int8
}
def preprocess(df):
df['flux_ratio_sq'] = np.power(df['flux'] / df['flux_err'], 2.0)
df['flux_by_flux_ratio_sq'] = df['flux'] * df['flux_ratio_sq']
df['date'] = df.mjd.astype(int)
df['year'] = ( df.date - df.groupby(['object_id']).date.transform('min') )/365
df['year'] = df['year'].astype(int)
# df['month'] = ( df.date - df.groupby(['object_id']).date.transform('min') )/30
# df['month'] = df['month'].astype(int)
#
# df['3month'] = ( df.date - df.groupby(['object_id']).date.transform('min') )/90
# df['3month'] = df['3month'].astype(int)
df['flux_norm1'] = df.flux / df.groupby(['object_id']).flux.transform('max')
df['flux_norm2'] = (df.flux - df.groupby(['object_id']).flux.transform('min')) / df.groupby(['object_id']).flux.transform('max')
df['flux_norm3'] = df.flux / df.groupby(['object_id', 'passband']).flux.transform('max')
return
def multi(splitn):
df = test_log[test_log.object_id%utils.SPLIT_SIZE == splitn].reset_index(drop=True)
preprocess(df)
df.to_pickle(f'../data/test_log{splitn:02}.pkl')
return
def ddf_to_wfd(df, n, oid_start):
"""
te.object_id.max()
130788054
"""
df['object_id_bk'] = df.object_id.copy()
df['object_id'] = df.object_id.rank(method='dense')
# oid_start = oid_max + 1
li = []
for i in tqdm(range(n)):
tmp = df.sample(frac=1, random_state=i).drop_duplicates(['object_id', 'date'])
tmp.object_id += oid_start
oid_start = tmp.object_id.max() #+ 1
# print(tmp.object_id.min(), tmp.object_id.max())
li.append(tmp)
df = pd.concat(li, ignore_index=True)
meta = df[train.columns.tolist()+['object_id_bk']].drop_duplicates('object_id')
log = df[train_log.columns]
meta.ddf = 0
return meta, log, meta.object_id.max()
# =============================================================================
# main
# =============================================================================
if __name__ == "__main__":
utils.start(__file__)
# =================
# train
# =================
train = pd.read_csv('../input/training_set_metadata.csv')
(train['hostgal_photoz'] == 0).to_pickle('../data/tr_is_gal.pkl')
train[train['hostgal_photoz'] == 0][['object_id']].reset_index(drop=True).to_pickle('../data/tr_oid_gal.pkl')
train[train['hostgal_photoz'] != 0][['object_id']].reset_index(drop=True).to_pickle('../data/tr_oid_exgal.pkl')
train.to_pickle('../data/train.pkl')
train[['target']].to_pickle('../data/target.pkl')
train_log = pd.read_csv('../input/training_set.csv.zip', dtype=COLUMN_TO_TYPE)
train_log = | pd.merge(train_log, train[['object_id', 'distmod']], on='object_id', how='left') | pandas.merge |
import matplotlib
# matplotlib.use('pgf')
# pgf_with_pdflatex = {
# "pgf.texsystem": "pdflatex",
# "pgf.preamble": [
# r"\usepackage[utf8x]{inputenc}",
# r"\usepackage[T1]{fontenc}",
# r"\usepackage{cmbright}",
# ]
# }
# matplotlib.rcParams.update(pgf_with_pdflatex)
import pandas
import re
import numpy
from matplotlib import pyplot
matplotlib.style.use('ggplot')
pyplot.interactive(False)
def to_min_secs(x, pos):
x = int(x)
minutes = x // 60
seconds = x % 60
return '{:02d}:{:02d}'.format(minutes, seconds)
def build_dataframe_case(case):
# mobility data
mobility_columns = ['module', 'max_speed', 'min_speed', 'start_time', 'stop_time',
'total_co2', 'total_dist', 'total_time']
case_df_mobility = pandas.read_csv(case + '_stats_veinsmobility.csv')
case_df_mobility.columns = mobility_columns
mobility_search_re = 'ProvidenciaExampleScenario.(.+?).veinsmobility'
case_df_mobility['module'] = case_df_mobility['module'].map(lambda x: re.search(mobility_search_re, x).group(1))
case_df_mobility.set_index(['module'], inplace=True)
# appl data (sent warnings, arrived at dest)
appl_columns = ['module', 'arrived', 'rcvd_warnings', 'sent_warnings']
case_df_appl = pandas.read_csv(case + '_stats_appl.csv')
case_df_appl.columns = appl_columns
appl_search_re = 'ProvidenciaExampleScenario.(.+?).appl'
case_df_appl['module'] = case_df_appl['module'].map(lambda x: re.search(appl_search_re, x).group(1))
case_df_appl['arrived'] = case_df_appl['arrived'].map({1: True, 0: False})
case_df_appl.set_index(['module'], inplace=True)
case_df_speed = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2021/9/7 21:16
# @Author : <NAME>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import re
import ast
file1 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-61-T-14-Sep-at-19-06-38.log'
file2 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-61-T-14-Sep-at-19-08-07.log'
file3 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-61-T-14-Sep-at-19-11-04.log'
file4 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-61-T-14-Sep-at-19-13-13.log'
file5 = 'output_search-cell-nas-bench-201_GDAS-cifar10-BN1_seed-61-T-14-Sep-at-20-33-15.log'
file6 = 'output_search-cell-nas-bench-201_GDAS-cifar10-BN1_seed-61-T-14-Sep-at-20-34-48.log'
file7 = 'output_search-cell-nas-bench-201_GDAS-cifar10-BN1_seed-61-T-14-Sep-at-20-36-25.log'
file8 = 'output_search-cell-nas-bench-201_GDAS-cifar10-BN1_seed-61-T-14-Sep-at-20-37-52.log'
file9 = 'output_search-cell-nas-bench-201_GDAS-cifar10-BN1_seed-610915-T-14-Sep-at-09-50-01.log'
file10 = 'output_search-cell-nas-bench-201_GDAS-cifar10-BN1_seed-610915-T-14-Sep-at-09-51-36.log'
file11 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-610915-T-14-Sep-at-09-46-41.log'
file12 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-610915-T-14-Sep-at-09-48-42.log'
file13 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-61-T-13-Sep-at-08-10-48.log' # Leaf-0001
file14 = 'output_search-cell-nas-bench-201_GDAS-cifar100-BN1_seed-61-T-13-Sep-at-08-12-23.log' # joey-4T4
file_proposal = 'FedNAS_Search_darts.log'
file_proposal1 = 'Ours_Search_darts.log'
file_proposal2 = 'FedNAS_128.log'
def before():
tep = pd.DataFrame()
files = [file1, file2, file3, file4]
cifar10_files = [file7, file8]
files610915 = [file9, file10, file11, file12]
# files = files610915
files = cifar10_files
files = [file13, file14]
# names = ['Personalize Arch+DL', 'DL only', 'only Personalized Arch', 'FL']
# names = ['cifar10_ousr', 'cifar10_baseline', 'cifar100_ours', 'cifar100_baseline']
names = ['pFed_NAS', 'baseline']
for file in files:
result = []
for user in range(5):
result.append([])
for line in open(file):
for user in range(5):
a = re.search('^User {}'.format(user), line)
if a:
if 'evaluate' in line:
result[user].append(float(re.findall('accuracy@1=(.+?)%',line)[0]))
result = pd.DataFrame(result)
result = result.T
result.columns = ['user0', 'user1', 'user2', 'user3', 'user4']
result['avg'] = result.mean(axis = 1)
tep[file] = result['avg']
tep.columns = names
a = tep.plot()
plt.tick_params(labelsize = 15)
plt.xlabel("Training Rounds", size = 15)
plt.ylabel("Mean Accuracy", size = 15)
plt.grid(linestyle = '-.')
plt.legend(prop = {'size':12})
plt.savefig('Figure3.eps', dpi = 600, format = 'eps')
plt.show()
tep = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
'''
@test($$;type(pd))
@alt(全ての|すべての|全)
@alt(の名前|名)
@alt(丸める|四捨五入する)
@alt(丸めて|四捨五入して)
@prefix(df;データフレーム)
@prefix(ds;データ列)
@prefix(col;カラム)
@alt(日付データ|タイムスタンプ[型|]|Pandasの日付型|datetime64型)
@prefix(value;[文字列|日付|])
データ列を使う
データ列をインポートする
'''
dateList = [ | pd.to_datetime('12-12-12') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Produce a JSON file used to enhance structural metadata in the IIIF manifests.
"""
import json
import tqdm
import click
import pandas as pd
from get_annotations import get_annotations_df
from helpers import write_to_csv, get_tag, get_transcription, get_source
from helpers import CACHE
def get_fragment_selector(target):
"""Return the fragmenet selector coordinates."""
if isinstance(target, dict):
return target['selector']['value'].lstrip('?xywh=')
return None
def get_lark(part_of):
"""Return the logical ARK."""
tmp = part_of.rstrip('/manifest.json')
return tmp.split('/iiif/')[1]
def add_fields(df):
"""Add fields to the dataframe."""
df['tag'] = df['body'].apply(get_tag)
df['transcription'] = df['body'].apply(get_transcription)
df['lark'] = df['partOf'].apply(get_lark)
df['source'] = df['target'].apply(get_source)
df['selector'] = df['target'].apply(get_fragment_selector)
return df
def filter_title_transcriptions(df):
"""Filter the title transcriptions."""
df = df[df['motivation'] == 'describing']
df = df[df['tag'] == 'title']
return df
def add_fragment_selectors_to_cols(df):
"""Add fragement selector coordinates to columns of the dataframe."""
df['x'], df['y'], df['w'], df['h'] = df['selector'].str.split(pat=',').str
df[['x', 'y', 'w', 'h']] = df[['x', 'y', 'w', 'h']].apply(pd.to_numeric)
return df
@CACHE.memoize(typed=True, expire=3600, tag='its_title_index')
def get_title_index_df():
"""Return title index as a dataframe."""
url = 'https://annotations.libcrowds.com/annotations/playbills-results/'
df = get_annotations_df(url)
df = add_fields(df)
df = filter_title_transcriptions(df)
df = add_fragment_selectors_to_cols(df)
groups = df.groupby('source', as_index=False)
out = []
for source, group_df in tqdm.tqdm(groups, desc='Processing',
unit='annotation'):
sorted_df = group_df.sort_values(by=['y', 'x'], ascending=True)
titles = sorted_df['transcription'].tolist()
lark = sorted_df.iloc[0]['lark']
title = titles[0]
if len(titles) > 0:
title += ', etc.'
row = {
'l-ark': lark,
'canvas-ark': source.split('/iiif/')[-1],
'title-summary': json.dumps(title).strip('"') # JSON-escape
}
out.append(row)
out_df = | pd.DataFrame(out) | pandas.DataFrame |
#!/usr/bin/python3
import math as m
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tkinter import *
from pandastable import Table, TableModel
import pandas as pd
import h5_spectrum as H5
STAT_NORMAL = np.dtype([(H5.MEAN_MEMBER, np.float64),
(H5.STANDARD_DEVIATION_MEMBER, np.float64),
(H5.NUMBER_OF_SAMPLES_MEMBER, np.int32),
(H5.SUM_MEMBER, np.float64),
(H5.SUM_OF_SQUARES_MEMBER, np.float64)])
# structure to store and perform online computation of basic descriptive indexes for a normally distributed variable
# Perform online computation by adding individual elements to the object as described by:
# Reference: @ARTICLE{Welford62noteon,
# author = {Author(s) <NAME> and <NAME>},
# title = {Note on a method for calculating corrected sums of squares and products},
# journal = {Technometrics},
# year = {1962},
# pages = {419--420}
# }
# TODO: Discuss algorithm variation on https://stackoverflow.com/questions/5543651/computing-standard-deviation-in-a-stream
class Normal:
def __init__(self):
self.mean_value = np.NaN # mean_value = ((count*mean_value)+ X )/(count+1)
self.std_value = np.NaN # std_value = ( n-2 / n-1 ) std_value {n-1}+{1\over n}(X_n-\bar X_{n-1})**2.
self.count = 0 # count = count + 1
self.sum = 0.0 # to reduce the computational effort and rounding error on the average computation
self.sum_squares = 0.0 # to reduce the computational effort and reduce error on the standard deviation computation
# add element to the standard normal distribution
def add_element(self, new_element):
# local variable to help on the computation
old_mean = 0.0
delta = 0.0
# select appropriate update procedure according to the number of elements.
# for better efficiency, first consider an existing vector with 2 or more samples already registered
if self.count > 1:
old_mean = self.mean_value
self.sum = self.sum + new_element
self.count += 1
self.mean_value = self.sum / self.count
self.sum_squares = self.sum_squares + ((new_element-old_mean)*(new_element-self.mean_value))
# self.std_value = m.sqrt(self.sum_squares / self.count) # To be used if one wants to keep std_value updated
else:
# if there are 0 (negative number of elements are considered 0), set the first element
if self.count < 1:
self.mean_value = new_element
self.count = 1
self.sum = new_element
# else, if there is one element
else:
self.count = 2
self.mean_value = (self.mean_value + new_element) / self.count
self.sum = self.sum + new_element
delta = new_element-self.mean_value
self.sum_squares = delta*delta
# to updated std.value if automatic update is not used
def std_update(self) -> float:
# std_value = ( n-2 / n-1 ) std_value {n-1}+{1\over n}(X_n-\bar X_{n-1})².
if self.count > 1:
self.std_value = m.sqrt(self.sum_squares / self.count)
return self.std_value
# add set to the standard normal distribution. Consider that the population described on each object is not
# https://en.wikipedia.org/wiki/Pooled_variance#Population-based_statistics
def add_set(self, new_set):
# TODO: handle cases were one of the sets has one or two elements only
if self.sum_squares == np.NaN:
self.std_update()
if new_set.sum_squares == np.NaN:
new_set.std_update()
old_set = self
# TODO: handle case where
self.count = old_set.count + new_set.count
self.mean_value = (old_set.sum + new_set.sum) / self.count
self.sum = old_set.sum + new_set.sum
# TODO: handle cases to compute the sum_square, allowing to further add single elements to the object
self.sum_squares = np.NaN
self.std_value = m.sqrt(((((old_set.count*old_set.std_value**2) + (new_set.count*new_set.std_value**2))*self.count)+((old_set.count*new_set.count)*((old_set.mean_value-new_set.mean_value)**2)))/(self.count**2))
def np_set(self, data):
self.mean_value = data[H5.MEAN_MEMBER]
self.std_value = data[H5.STANDARD_DEVIATION_MEMBER]
self.count = data[H5.NUMBER_OF_SAMPLES_MEMBER]
self.sum = data[H5.SUM_MEMBER]
self.sum_squares = data[H5.SUM_OF_SQUARES_MEMBER]
def print(self, reference):
print(reference+"(\u03BC:{}, \u03C3:{}, #:{}, \u03A3:{}, SS:{})".format(self.mean_value, self.std_value, self.count, self.sum, self.sum_squares))
# program log function
def log_message (message):
process_timestamp = datetime.now()
print("{}: ".format(process_timestamp)+message)
# quick plot function for dataframe
def plot_dataframe(dataframe: pd.DataFrame, x_label = "Frequency[Hz]", y_label = ""):
xy_array = dataframe.to_numpy(dtype='float32')
x_axis = dataframe.columns.to_numpy(dtype='float64')
y_axis = dataframe.index.to_numpy(dtype='float64')
if y_axis[0] > 1000:
y_label = "Time [sec]"
else:
if y_axis[len(y_axis)-1] < -80:
y_label = "Level [dBm/m²]"
else:
y_label = "Level [dB\u03BCV/m]"
plt.pcolormesh(x_axis, y_axis, xy_array)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
# call pandas tables to visualize the dataframe. Will halt execution
class table_dataframe(Frame):
def __init__(self, df_data: pd.DataFrame, parent=None):
self.parent = parent
Frame.__init__(self)
self.main = self.master
self.main.geometry('600x400+200+100')
self.main.title('Table app')
f = Frame(self.main)
f.pack(fill=BOTH,expand=1)
if not isinstance(df_data, pd.DataFrame):
df_data_type = type(df_data)
if isinstance(df_data, tuple):
df_data = | pd.DataFrame(df_data) | pandas.DataFrame |
from flask import Flask, render_template, request, session, redirect, url_for
from datetime import datetime, timedelta
import pandas as pd
import sqlite3, hashlib, os, random, os, dotenv
app = Flask(__name__)
app.secret_key = "super secret key"
dotenv.load_dotenv()
MAPBOX_TOKEN = os.getenv('MAPBOX_TOKEN')
conn = sqlite3.connect('data/web.db', check_same_thread=False)
@app.route('/dashboard/')
def dashboard():
w_all = pd.read_sql("select * from w_all", conn)
w_sales_history = pd.read_sql("select * from w_sales_history", conn)
data = {"all": w_all, "sales_history": w_sales_history}
yesterday = datetime.now() - timedelta(days=1)
return render_template('dashboard.html', date=yesterday.strftime("%d %b %Y"), data=data)
@app.route('/sales/')
def sales():
w_sales = pd.read_sql("select * from w_sales", conn)
w_sales_days = pd.read_sql("select * from w_sales_days", conn)
data = {"sales": w_sales, "days": w_sales_days}
yesterday = datetime.now() - timedelta(days=1)
return render_template('sales.html', date=yesterday.strftime("%d %b %Y"), data=data)
@app.route('/delivery/')
def pod():
w_delivery = pd.read_sql("select * from w_delivery", conn)
w_courier = pd.read_sql("select * from w_courier", conn)
data = {"delivery": w_delivery, "courier": w_courier}
yesterday = datetime.now() - timedelta(days=1)
return render_template('delivery.html', date=yesterday.strftime("%d %b %Y"), data=data)
@app.route('/', methods=['GET','POST'])
@app.route('/login/', methods=['GET','POST'])
def login():
msg = ''
sts = ''
if request.method == 'POST':
usr = request.form['username']
pwd = request.form['password']
hash_pwd = hashlib.sha1(pwd.encode('utf-8')).hexdigest()
cur = conn.cursor()
cur.execute("SELECT password FROM w_user WHERE username='{}'".format(usr))
key = cur.fetchone()
if key:
if hash_pwd == key[0]:
cur2 = conn.cursor()
login = cur2.execute("SELECT count_login FROM w_user where username='{}'".format(usr)).fetchone()[0]
cur2.execute("UPDATE w_user SET count_login={}, datetime_register='{}' WHERE username='{}'".format(login+1, datetime.now(), usr))
conn.commit()
session['username'] = usr
return redirect(url_for('dashboard'))
else:
msg = 'Username or password invalid'
sts = 'NOT OK'
else:
msg = 'Username or password invalid'
sts = 'NOT OK'
return render_template('login.html', data=msg)
@app.route("/logout/")
def logout():
session['username'] = None
return login()
@app.route('/signup/', methods=['GET','POST'])
def signup():
msg = ''
sts = ''
img = os.listdir('static/images/captcha')[random.randint(0,9)]
if request.method == 'POST':
usr = request.form['username']
pwd = request.form['password']
repwd = request.form['repassword']
icaptcha = request.form['icaptcha'].replace('.png','')
captcha = request.form['captcha']
cur = conn.cursor()
if pwd != repwd:
msg = "Your password is not same"
sts = 'NOT OK'
elif len(usr)<5:
msg = "Username too short"
sts = 'NOT OK'
elif len(pwd)<5:
msg = "Password too short"
sts = 'NOT OK'
elif icaptcha != hashlib.sha1(captcha.encode('utf-8')).hexdigest():
msg = "You type captcha wrong"
sts = 'NOT OK'
elif cur.execute("SELECT count(*) FROM w_user WHERE username='{}'".format(usr)).fetchone()[0] > 0:
msg = "This username is not available"
sts = 'NOT OK'
else:
try:
hash_pwd = hashlib.sha1(pwd.encode('utf-8')).hexdigest()
sql_insert = """
INSERT INTO w_user VALUES ('{}', '{}', '{}', null, 0);""".format(usr, hash_pwd, datetime.now())
cur.execute(sql_insert)
conn.commit()
msg = "Thanks for Register. You can now login with your username."
sts = 'OK'
except Exception as e:
msg = str(e)
msg += "Registration failed. Contact our support for more information."
sts = 'NOT OK'
data = {"msg": msg, "img": img, "sts": sts}
return render_template('signup.html', data=data)
@app.route('/location/')
def location():
w_branch = | pd.read_sql("select * from w_branch", conn) | pandas.read_sql |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/21 17:40
Desc: 天天基金网-基金档案-投资组合
http://fundf10.eastmoney.com/ccmx_000001.html
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.utils import demjson
def fund_portfolio_hold_em(symbol: str = "162411", date: str = "2020") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-基金持仓
http://fundf10.eastmoney.com/ccmx_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 基金持仓
:rtype: pandas.DataFrame
"""
url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx"
params = {
"type": "jjcc",
"code": symbol,
"topline": "200",
"year": date,
"month": "",
"rt": "0.913877030254846",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
soup = BeautifulSoup(data_json["content"], "lxml")
item_label = [
item.text.split("\xa0\xa0")[1]
for item in soup.find_all("h4", attrs={"class": "t"})
]
big_df = pd.DataFrame()
for item in range(len(item_label)):
temp_df = pd.read_html(data_json["content"], converters={"股票代码": str})[item]
del temp_df["相关资讯"]
temp_df["占净值比例"] = temp_df["占净值比例"].str.split("%", expand=True).iloc[:, 0]
temp_df.rename(columns={"持股数(万股)": "持股数", "持仓市值(万元)": "持仓市值"}, inplace=True)
temp_df.rename(columns={"持股数(万股)": "持股数", "持仓市值(万元人民币)": "持仓市值"}, inplace=True)
temp_df["季度"] = item_label[item]
temp_df = temp_df[
[
"序号",
"股票代码",
"股票名称",
"占净值比例",
"持股数",
"持仓市值",
"季度",
]
]
big_df = big_df.append(temp_df, ignore_index=True)
big_df["占净值比例"] = pd.to_numeric(big_df["占净值比例"], errors="coerce")
big_df["持股数"] = pd.to_ | numeric(big_df["持股数"], errors="coerce") | pandas.to_numeric |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": | pandas.StringDtype() | pandas.StringDtype |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
s = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
tm.assert_index_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = offset.apply_index(s)
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
exp = DatetimeIndex(dates, freq="SMS")
tm.assert_index_equal(result, exp)
offset_cases = [
(
SemiMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
),
(
SemiMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1),
},
),
(
SemiMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
),
(
SemiMonthBegin(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4),
},
),
(
SemiMonthBegin(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
),
]
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + | SemiMonthBegin() | pandas._libs.tslibs.offsets.SemiMonthBegin |
# coding: utf-8
"""Extract AA mutations from NT mutations
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import pandas as pd
from scripts.fasta import read_fasta_file
from scripts.util import translate
def extract_aa_mutations(
dna_mutation_file, gene_or_protein_file, reference_file, mode="gene"
):
# Load the reference sequence
with open(reference_file, "r") as fp:
lines = fp.readlines()
ref = read_fasta_file(lines)
ref_seq = list(ref.values())[0]
# JSON to dataframe
gene_or_protein_df = | pd.read_json(gene_or_protein_file) | pandas.read_json |
"""Class definition for the DataSetParser ABC and FeaturizerMixin."""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Generator, List, Tuple, Type
import numpy as np
import pandas as pd
from sklearn.preprocessing import RobustScaler
class FeaturizerMixin:
"""Mixin to provide secondary featurization functionality."""
def featurize_secondary(self):
"""
Perform secondary featurization.
Sequentially trigger each featurizer to extract secondary features.
The extracted secondary metafeatures are stored in each featurizer's
`sec_metafeatures` and `sec_test_metafeatures` attributes.
These extracted metafeatures will then be collected and appended column-wise
to the `metafeature` and `test_metafeature` attributes of the DataSetParser
subclass instance.
"""
for featurizer in self.featurizers:
if type(featurizer).__name__ == "RawDataSetFeaturizerViaLambda":
featurizer.featurize(
self._create_raw_generator(),
keys=self.metafeatures,
test_keys=self.test_metafeatures,
multiprocess=self._multiprocess_raw_secondary,
)
else:
featurizer.featurize(
meta_df=self.metafeatures,
test_meta_df=self.test_metafeatures,
)
self.__add_secondary_metafeatures()
def __add_secondary_metafeatures(self):
"""Add secondary features to the training and test metafeature attributes."""
# Get secondary feature names
if self.metafeatures is not None:
sec_feature_names = list(self.metafeatures) + [
name
for featurizer in self.featurizers
for name in featurizer.sec_feature_names
]
elif self.test_metafeatures is not None:
sec_feature_names = list(self.test_metafeatures) + [
name
for featurizer in self.featurizers
for name in featurizer.sec_feature_names
]
if self.metafeatures is not None:
sec_metafeatures = [x.sec_metafeatures for x in self.featurizers]
self.metafeatures = pd.concat(
[self.metafeatures, *sec_metafeatures],
axis=1,
ignore_index=True,
)
self.metafeatures.columns = sec_feature_names
if self.test_metafeatures is not None:
sec_test_metafeatures = [
x.sec_test_metafeatures for x in self.featurizers
]
self.test_metafeatures = pd.concat(
[self.test_metafeatures, *sec_test_metafeatures],
axis=1,
ignore_index=True,
)
self.test_metafeatures.columns = sec_feature_names
class DataSetParser(ABC, FeaturizerMixin):
"""
Abstract base class to load and extract metafeatures from raw data sets.
FeaturizerMixin provides the `.featurize` method.
Instance attributes:
src {Path}
-- Path to data set file on disk.
metafeatures {pd.DataFrame}
-- Metafeatures extracted from the raw data set. Each metafeature
row corresponds to a feature column in the raw data set.
labels {pd.Series}
-- Label corresponding to each metafeature.
test_src {Path}
-- Optional path to test raw data set file on disk. This attribute
applies more to the subclasses of MetaDataSetParser.
test_metafeatures {pd.DataFrame}
-- Optional metafeatures extracted from the test raw data set.
test_labels {pd.Series}
-- Optional labels corresponding to each test metafeature row.
scaler {RobustScaler}
-- A scaler to handle normalize metafeatures before serving them
for training.
featurizers: {List}
-- A list of featurizers that performs secondary metafeaturizations.
Class attributes:
NUM_BASE_METAFEATURES {int}
-- Number of base metafeatures.
Used to separate base and secondary metafeatures.
Abstract methods:
load_data_set
-- Load the data set and perform necessarily cleaning and parsing.
featurize_base
-- Featurize base metafeatures.
normalize_features
-- Performs normalization on the metafeatures and test metafeatures
(if provided).
_create_raw_generator
-- Returns a generator of raw data sets. This supports the
MetaDataSetFeaturizerViaLambda class functionality.
"""
NUM_BASE_METAFEATURES = (
7
) # Includes (total_val, min, max, mean, std, num_nans, num_distincts)
def __init__(self):
"""Init function."""
self.src: Path = None
self.labels: pd.Series = None
self.metafeatures: pd.DataFrame = None
self.test_src: Path = None
self.test_labels: pd.Series = None
self.test_metafeatures: pd.DataFrame = None
self.scaler: Type[RobustScaler] = None
self.featurizers: List = []
self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s)
@abstractmethod
def load_data_set(self):
"""Load data set from source."""
raise NotImplementedError
@abstractmethod
def featurize_base(self):
"""Featurize base metafeatures."""
raise NotImplementedError
@abstractmethod
def normalize_features(self):
"""Normalize metafeatures for training."""
raise NotImplementedError
@abstractmethod
def _create_raw_generator(
self
) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]:
raise NotImplementedError
def _select_metafeatures(
self, df: pd.DataFrame, mark: str = "*"
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Select metafeatures to normalize and to retain for training.
The following criteria is used.
Metafeatures to:
- normalize: Numerical columns
- not normalize but retain for training: Features whose title ends with `mark`.
Remainder metafeatures are dropped.
Note:
Columns are tracked by indices instead of names to avoid problems when
there are duplicated columnn names.
Arguments:
df {pd.DataFrame}
-- Metafeatures dataframe.
mark {str}
-- Character to append to names of columns that should not be
normlized but retained for training.
Returns:
Tuple[pd.DataFrame, pd.DataFrame]
-- (metafeatures_to_normalize, metafeatures_to_retain)
"""
idx_to_normalize: List[int] = []
idx_to_retain: List[int] = []
IGNORE_COLS = (
"attribute_name", # Already represented as ngrams
"sample", # Ignore sample columns which may be of type int
"total_val", # Intent prediction should not be based on # data points
"num_distincts", # Use `normalized_distinct_rate` instead
"num_nans", # Captured in `nan_rate`
)
for i, col in enumerate(df.columns):
if col in IGNORE_COLS:
continue
# Save columns that are either numeric or that have been marked
# into appropriate groups
if col[-1] == "*":
idx_to_retain.append(i)
elif self._is_numeric(df.iloc[:, i]):
idx_to_normalize.append(i)
features_to_normalize = df.iloc[:, idx_to_normalize]
features_to_retain = df.iloc[:, idx_to_retain]
return features_to_normalize, features_to_retain
def _is_numeric(self, series: pd.Series) -> bool:
return | pd.api.types.is_numeric_dtype(series) | pandas.api.types.is_numeric_dtype |
from hddm.simulators import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
import pymc as pm
import os
import warnings
import hddm
import pandas as pd
from kabuki.analyze import _post_pred_generate, _parents_to_random_posterior_sample
from statsmodels.distributions.empirical_distribution import ECDF
from hddm.model_config import model_config
# Basic utility
def prettier_tag(tag):
len_tag = len(tag)
if len_tag == 1:
return tag[0]
else:
return "(" + ", ".join([str(t) for t in tag]) + ")"
# Plot Composer Functions
def plot_posterior_pair(
model,
plot_func=None,
save=False,
path=None,
figsize=(8, 6),
format="png",
samples=100,
parameter_recovery_mode=False,
**kwargs
):
"""Generate posterior pair plots for each observed node.
Arguments:
model: kabuki.Hierarchical
The (constructed and sampled) kabuki hierarchical model to
create the posterior preditive from.
Optional:
samples: int <default=10>
How many posterior samples to use.
columns: int <default=3>
How many columns to use for plotting the subjects.
bins: int <default=100>
How many bins to compute the data histogram over.
figsize: (int, int) <default=(8, 6)>
save: bool <default=False>
Whether to save the figure to a file.
path: str <default=None>
Save figure into directory prefix
format: str or list of strings <default='png'>
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
parameter_recovery_mode: bool <default=False>
If the data attached to the model supplied under the model argument
has the format expected of the simulator_h_c() function from the simulators.hddm_dataset_generators
module, then parameter_recovery_mode = True can be use to supply ground truth parameterizations to the
plot_func argument describes below.
plot_func: function <default=_plot_posterior_pdf_node>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
if hasattr(model, "model"):
kwargs["model_"] = model.model
else:
kwargs["model_"] = "ddm_vanilla"
if plot_func is None:
plot_func = _plot_func_pair
observeds = model.get_observeds()
kwargs["figsize"] = figsize
kwargs["n_samples"] = samples
# Plot different conditions (new figure for each)
for tag, nodes in observeds.groupby("tag"):
# Plot individual subjects (if present)
for subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):
if "subj_idx" in bottom_node:
if str(node_name) == "wfpt":
kwargs["title"] = str(subj_i)
else:
kwargs["title"] = str(node_name)
if parameter_recovery_mode:
kwargs["node_data"] = model.data.loc[bottom_node["node"].value.index]
g = plot_func(bottom_node["node"], **kwargs)
plt.show()
# Save figure if necessary
if save:
print("passing_print")
if len(tag) == 0:
fname = "ppq_subject_" + str(subj_i)
else:
fname = "ppq_" + ".".join(tag) + "_subject_" + str(subj_i)
if path is None:
path = "."
if isinstance(format, str):
format = [format]
print(["%s.%s" % (os.path.join(path, fname), x) for x in format])
[
g.fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
def plot_from_data(
df,
generative_model="ddm_vanilla",
plot_func=None,
columns=None,
save=False,
path=None,
groupby="subj_idx",
figsize=(8, 6),
format="png",
**kwargs
):
"""Plot data from a hddm ready DataFrame.
Arguments:
df : pd.DataFrame
HDDM ready dataframe.
value_range : numpy.ndarray
Array to evaluate the likelihood over.
Optional:
columns : int <default=3>
How many columns to use for plotting the subjects.
bins : int <default=100>
How many bins to compute the data histogram over.
figsize : (int, int) <default=(8, 6)>
save : bool <default=False>
Whether to save the figure to a file.
path : str <default=None>
Save figure into directory prefix
format : str or list of strings
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
plot_func : function <default=_plot_func_posterior_pdf_node_nn>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
kwargs["model_"] = generative_model
title_ = kwargs.pop("title", "")
ax_title_size = kwargs.pop("ax_title_fontsize", 10)
if type(groupby) == str:
groupby = [groupby]
if plot_func is None:
plot_func = _plot_func_posterior_pdf_node_nn
if columns is None:
# If there are less than 3 items to plot per figure,
# only use as many columns as there are items.
max_items = max([len(i[1]) for i in df.groupby(groupby).groups.items()])
columns = min(3, max_items)
n_plots = len(df.groupby(groupby))
# Plot different conditions (new figure for each)
fig = plt.figure(figsize=figsize)
fig.suptitle(title_, fontsize=12)
fig.subplots_adjust(top=0.9, hspace=0.4, wspace=0.3)
i = 1
for group_id, df_tmp in df.groupby(groupby):
nrows = np.ceil(n_plots / columns)
# Plot individual subjects (if present)
ax = fig.add_subplot(np.ceil(nrows), columns, i)
# Allow kwargs to pass to the plot_func, whether this is the first plot
# (useful to generate legends only for the first subplot)
if i == 1:
kwargs["add_legend"] = True
else:
kwargs["add_legend"] = False
# Make axis title
tag = ""
for j in range(len(groupby)):
tag += groupby[j] + "(" + str(group_id[j]) + ")"
if j < (len(groupby) - 1):
tag += "_"
print(tag)
ax.set_title(tag, fontsize=ax_title_size)
# Call plot function on ax
# This function should manipulate the ax object, and is expected to not return anything.
plot_func(df_tmp, ax, **kwargs)
i += 1
# Save figure if necessary
if save:
fname = "ppq_" + tag
if path is None:
path = "."
if isinstance(format, str):
format = [format]
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
def plot_posterior_predictive(
model,
plot_func=None,
required_method="pdf",
columns=None,
save=False,
path=None,
figsize=(8, 6),
format="png",
num_subjs=None,
parameter_recovery_mode=False,
**kwargs
):
"""Plot the posterior predictive distribution of a kabuki hierarchical model.
Arguments:
model : kabuki.Hierarchical
The (constructed and sampled) kabuki hierarchical model to
create the posterior preditive from.
value_range : numpy.ndarray
Array to evaluate the likelihood over.
Optional:
samples : int <default=10>
How many posterior samples to generate the posterior predictive over.
columns : int <default=3>
How many columns to use for plotting the subjects.
bins : int <default=100>
How many bins to compute the data histogram over.
figsize : (int, int) <default=(8, 6)>
save : bool <default=False>
Whether to save the figure to a file.
path : str <default=None>
Save figure into directory prefix
format : str or list of strings
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
parameter_recovery_mode: bool <default=False>
If the data attached to the model supplied under the model argument
has the format expected of the simulator_h_c() function from the simulators.hddm_dataset_generators
module, then parameter_recovery_mode = True can be use to supply ground truth parameterizations to the
plot_func argument describes below.
plot_func : function <default=_plot_func_posterior_pdf_node_nn>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
if hasattr(model, "model"):
kwargs["model_"] = model.model
else:
kwargs["model_"] = "ddm_vanilla"
if plot_func is None:
plot_func = _plot_func_posterior_pdf_node_nn
observeds = model.get_observeds()
if columns is None:
# If there are less than 3 items to plot per figure,
# only use as many columns as there are items.
max_items = max([len(i[1]) for i in observeds.groupby("tag").groups.items()])
columns = min(3, max_items)
# Plot different conditions (new figure for each)
for tag, nodes in observeds.groupby("tag"):
fig = plt.figure(figsize=figsize) # prev utils.pretty_tag
fig.suptitle(prettier_tag(tag), fontsize=12)
fig.subplots_adjust(top=0.85, hspace=0.4, wspace=0.3)
nrows = num_subjs or np.ceil(len(nodes) / columns)
if len(nodes) - (int(nrows) * columns) > 0:
nrows += 1
# Plot individual subjects (if present)
i = 0
for subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):
i += 1
if not hasattr(bottom_node["node"], required_method):
continue # skip nodes that do not define the required_method
ax = fig.add_subplot(np.ceil(nrows), columns, subj_i + 1)
if "subj_idx" in bottom_node:
ax.set_title(str(bottom_node["subj_idx"]))
# Allow kwargs to pass to the plot_func, whether this is the first plot
# (useful to generate legends only for the first subplot)
if i == 1:
kwargs["add_legend"] = True
else:
kwargs["add_legend"] = False
if parameter_recovery_mode:
kwargs["parameter_recovery_mode"] = True
kwargs["node_data"] = model.data.loc[bottom_node["node"].value.index]
# Call plot function on ax
# This function should manipulate the ax object, and is expected to not return anything.
plot_func(bottom_node["node"], ax, **kwargs)
if i > (np.ceil(nrows) * columns):
warnings.warn("Too many nodes. Consider increasing number of columns.")
break
if num_subjs is not None and i >= num_subjs:
break
# Save figure if necessary
if save:
fname = "ppq_" + ".".join(tag)
if path is None:
path = "."
if isinstance(format, str):
format = [format]
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
# AXIS MANIPULATORS ---------------
def _plot_func_posterior_pdf_node_nn(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.2,
plot_likelihood_raw=False,
**kwargs
):
"""Calculate posterior predictives from raw likelihood values and plot it on top of a histogram of the real data.
The function does not define a figure, but manipulates an axis object.
Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
Optional:
model : str <default='ddm_vanilla'>
str that defines the generative model underlying the kabuki model from which the bottom_node
argument derives.
samples : int <default=10>
Number of posterior samples to use.
bin_size: float <default=0.2>
Size of bins for the data histogram.
plot_likelihood_raw : bool <default=False>
Whether or not to plot likelihoods sample wise.
add_legend : bool <default=True>
Whether or not to add a legend to the plot
linewidth : float <default=0.5>
Linewidth of histogram outlines.
"""
# Setup -----
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
model_ = kwargs.pop("model_", "ddm_vanilla")
add_legend = kwargs.pop("add_legend", True)
alpha_line = kwargs.pop("alpha", 0.05)
lw_ = kwargs.pop("linewidth", 0.5)
choices = model_config[model_]["choices"]
n_choices = model_config[model_]["n_choices"]
bins = np.arange(value_range[0], value_range[-1], bin_size)
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if n_choices == 2:
like = np.empty((samples, len(value_range)), dtype=np.float32)
pdf_in = value_range
else:
like = np.empty((samples, len(value_range), n_choices), dtype=np.float32)
pdf_in = np.zeros((len(value_range), 2))
pdf_in[:, 0] = value_range
# -----
# Get posterior parameters and plot corresponding likelihoods (if desired) ---
for sample in range(samples):
# Get random posterior sample
_parents_to_random_posterior_sample(bottom_node)
# Generate likelihood for parents parameters
if n_choices == 2:
like[sample, :] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
axis.plot(
value_range,
like[sample, :],
color="black",
lw=1.0,
alpha=alpha_line,
)
else:
c_cnt = 0
for choice in choices:
pdf_in[:, 1] = choice
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
axis.plot(
pdf_in[:, 0],
like[sample, :, c_cnt],
color=color_dict[choice],
lw=1.0,
alpha=alpha_line,
)
c_cnt += 1
# -------
# If we don't plot raw likelihoods, we generate a mean likelihood from the samples above
# and plot it as a line with uncertainty bars
if not plot_likelihood_raw:
y = like.mean(axis=0)
try:
y_std = like.std(axis=0)
except FloatingPointError:
print(
"WARNING! %s threw FloatingPointError over std computation. Setting to 0 and continuing."
% bottom_node.__name__
)
y_std = np.zeros_like(y)
if n_choices == 2:
axis.plot(value_range, y, label="post pred", color="black")
axis.fill_between(
value_range, y - y_std, y + y_std, color="black", alpha=0.5
)
else:
c_cnt = 0
for choice in choices:
axis.plot(
value_range,
y[:, c_cnt],
label="post pred",
color=color_dict[choice],
)
axis.fill_between(
value_range,
y[:, c_cnt] - y_std[:, c_cnt],
y[:, c_cnt] + y_std[:, c_cnt],
color=color_dict[choice],
alpha=0.5,
)
c_cnt += 1
# Plot data
if len(bottom_node.value) != 0:
if n_choices == 2:
rt_dat = bottom_node.value.copy()
if np.sum(rt_dat.rt < 0) == 0:
rt_dat.loc[rt_dat.response != 1, "rt"] = (-1) * rt_dat.rt[
rt_dat.response != 1
].values
axis.hist(
rt_dat.rt.values,
density=True,
color="blue",
label="data",
bins=bins,
linestyle="-",
histtype="step",
lw=lw_,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / bottom_node.value.shape[0],
reps=bottom_node.value[bottom_node.value.response == choice].shape[
0
],
)
if np.sum(bottom_node.value.response == choice) > 0:
axis.hist(
bottom_node.value.rt[bottom_node.value.response == choice],
bins=np.arange(value_range[0], value_range[-1], bin_size),
weights=weights,
color=color_dict[choice],
label="data",
linestyle="dashed",
histtype="step",
lw=lw_,
)
axis.set_ylim(bottom=0) # Likelihood and histogram can only be positive
# Add a custom legend
if add_legend:
# If two choices only --> show data in blue, posterior samples in black
if n_choices == 2:
custom_elems = []
custom_titles = []
custom_elems.append(Line2D([0], [0], color="blue", lw=1.0, linestyle="-"))
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
# If more than two choices --> more styling
else:
custom_elems = [
Line2D([0], [0], color=color_dict[choice], lw=1) for choice in choices
]
custom_titles = ["response: " + str(choice) for choice in choices]
custom_elems.append(
Line2D([0], [0], color="black", lw=1.0, linestyle="dashed")
)
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
axis.legend(custom_elems, custom_titles, loc="upper right")
def _plot_func_posterior_node_from_sim(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.1,
add_posterior_uncertainty=True,
add_posterior_mean=True,
**kwargs
):
"""Calculate posterior predictive for a certain bottom node and plot a histogram using the supplied axis element.
:Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
:Optional:
samples : int (default=10)
Number of posterior samples to use.
bin_size : int (default=100)
Number of bins to compute histogram over.
add_posterior_uncertainty: bool (default=True)
Plot individual posterior samples or not.
add_posterior_mean: bool (default=True)
Whether to add a mean posterior (histogram from a dataset collapsed across posterior samples)
alpha: float (default=0.05)
alpha (transparency) level for plot elements from single posterior samples.
linewidth: float (default=0.5)
linewidth used for histograms
add_legend: bool (default=True)
whether or not to add a legend to the current axis.
legend_loc: str <default='upper right'>
string defining legend position. Find the rest of the options in the matplotlib documentation.
legend_shadow: bool <default=True>
Add shadow to legend box?
legend_fontsize: float <default=12>
Fontsize of legend.
model_: str (default='lca_no_bias_4')
string that the defines generative models used (e.g. 'ddm', 'ornstein' etc.).
"""
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if len(value_range) == 1:
value_range = (-value_range[0], value_range[0])
else:
value_range = (value_range[0], value_range[-1])
# Extract some parameters from kwargs
bins = np.arange(value_range[0], value_range[1], bin_size)
# add_uc = kwargs.pop('add_posterior_uncertainty', True)
# add_mean = kwargs.pop('add_posterior_mean', True)
sample_hist_alpha = kwargs.pop("alpha", 0.05)
lw_ = kwargs.pop("linewidth", 0.5)
add_legend = kwargs.pop("add_legend", True)
model_ = kwargs.pop("model_", "lca_no_bias_4")
choices = model_config[model_]["choices"]
n_choices = model_config[model_]["n_choices"]
legend_loc = kwargs.pop("legend_loc", "upper right")
legend_fs = kwargs.pop("legend_fontsize", 12)
legend_shadow = kwargs.pop("legend_shadow", True)
# like = np.empty((samples, len(value_range)), dtype=np.float32)
if type(bottom_node) == pd.DataFrame:
samples = None
data_tmp = bottom_node
data_only = 1
else:
samples = _post_pred_generate(
bottom_node,
samples=samples,
data=None,
append_data=False,
add_model_parameters=False,
)
data_tmp = bottom_node.value
data_only = 0
# Go sample by sample (to show uncertainty)
if add_posterior_uncertainty and not data_only:
for sample in samples:
if n_choices == 2:
if np.sum(sample.rt < 0) == 0:
sample.loc[sample.response != 1, "rt"] = (-1) * sample.rt[
sample.response != 1
].values
axis.hist(
sample.rt,
bins=bins,
density=True,
color="black",
label="posterior",
histtype="step",
lw=lw_,
alpha=sample_hist_alpha,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / sample.shape[0],
reps=sample.loc[sample.response == choice, :].shape[0],
)
axis.hist(
sample.rt[sample.response == choice],
bins=bins,
weights=weights,
color=color_dict[choice],
label="posterior",
histtype="step",
lw=lw_,
alpha=sample_hist_alpha,
)
# Add a 'mean' line
if add_posterior_mean and not data_only:
concat_data = | pd.concat(samples) | pandas.concat |
#!/usr/bin/env python3
import os, re, sys, logging, csv, multiprocessing
import pandas as pd
from itertools import groupby
import itertools, functools
try:
from Bio.Alphabet import generic_dna, IUPAC
Bio_Alphabet = True
except ImportError:
Bio_Alphabet = None
# usages of generic_dna, IUPAC are not supported in Biopython 1.78 (September 2020).
print(f"The installed BioPython is a new version that has removed the Alphabet module.",file=sys.stderr)
from Bio.Seq import Seq
from collections import OrderedDict
from HTGTSrep.lib import loggingRun, getInputSeq, getCSV, collapse_db
def mutV(ig_dict):
''' Parse V_BTOP string and generate converted V string
'''
# parse V_BTOP, do it in a naive way
V_BTOP = ig_dict['V_BTOP']
V_mutNum = 0
V_string = '-' * (ig_dict['V_GERM_START_VDJ'] - 1)
match_breaks = []
for m in re.finditer(r'\d+', V_BTOP):
match_breaks.append(m.start(0))
match_breaks.append(m.end(0))
match_breaks.append(len(V_BTOP))
for i in range(0, len(match_breaks)):
if i == len(match_breaks)-1: continue
match = V_BTOP[match_breaks[i]:match_breaks[i+1]]
if match:
try:
match = int(match)
V_string += '.' * match
except:
mismatches = [match[i: i+2] for i in range(0, len(match), 2)]
for mismatch in mismatches:
if mismatch[1] == '-': continue
if mismatch[0] == '-':
V_string += '-'
else:
V_string += mismatch[0]
if mismatch[0] in 'ATCG': V_mutNum += 1
if len(V_string) > ig_dict['V_GENE_LEN']:
logging.error('Error: V mutation string longer than V gene, %s' \
% ig_dict['SEQUENCE_ID'])
sys.exit()
else:
V_string += '-' * (ig_dict['V_GENE_LEN'] - len(V_string))
ig_dict['V_ALLELE_NUC'] = V_string
ig_dict['V_MUTATION'] = V_mutNum
return ig_dict
def dmask(ig_dict, repo_dict):
"""
Join gapped germline sequences aligned with sample sequences
Arguments:
ig_dict = iterable yielding dictionaries of sample sequence data
repo_dict = dictionary of IMGT gapped germline sequences
Returns:
dictionary of germline_type: germline_sequence
"""
align = ig_dict
vgene = ig_dict['V_ALLELE']
if vgene in repo_dict:
vseq = repo_dict[vgene]
ig_dict['V_GENE_GAP_LEN'] = len(vseq)
vstart = int(float(align['V_GERM_START_IMGT'])) - 1
vlen = int(float(align['V_GERM_LENGTH_IMGT']))
vpad = vlen - len(vseq[vstart:])
if vpad < 0: vpad = 0
germ_vseq = vseq[vstart:(vstart + vlen)] + ('N' * vpad)
else:
return ig_dict
dgene = ig_dict['D_ALLELE']
if dgene != '-':
if dgene in repo_dict:
dseq = repo_dict[dgene]
# Germline start
try: dstart = int(float(align['D_GERM_START'])) - 1
except (TypeError, ValueError): dstart = 0
# Germline length
try: dlen = int(float(align['D_GERM_LENGTH']))
except (TypeError, ValueError): dlen = 0
germ_dseq = repo_dict[dgene][dstart:(dstart + dlen)]
else:
# logging.warning('Error: D gene %s not in the IMGT-gapped seq file' % dgene)
return ig_dict
else:
germ_dseq = ''
jgene = ig_dict['J_ALLELE']
if jgene != '-':
if jgene in repo_dict:
jseq = repo_dict[jgene]
# Germline start
try: jstart = int(float(align['J_GERM_START'])) - 1
except (TypeError, ValueError): jstart = 0
# Germline length
try: jlen = int(float(align['J_GERM_LENGTH']))
except (TypeError, ValueError): jlen = 0
jpad = jlen - len(jseq[jstart:])
if jpad < 0: jpad = 0
germ_jseq = jseq[jstart:(jstart + jlen)] + ('N' * jpad)
else:
# logging.warning('Error: J gene %s not in the IMGT-gapped seq file' % jgene)
return ig_dict
else:
try: jlen = int(float(align['J_GERM_LENGTH']))
except (TypeError, ValueError): jlen = 0
germ_jseq = 'N' * jlen
# Assemble pieces starting with V-region
germ_seq = germ_vseq
regions = 'V' * len(germ_vseq)
try:
np1_len = int(float(align['NP1_LENGTH']))
except (TypeError, ValueError):
np1_len = 0
# PNP nucleotide additions after V
if np1_len < 0:
result_log['ERROR'] = 'NP1_LENGTH is negative'
return result_log, germlines
regions += 'N' * np1_len
germ_seq += 'N' * np1_len
# Add D-region
germ_seq += germ_dseq
regions += 'D' * len(germ_dseq)
# 'VD>', germ_seq, '\nVD>', regions
try:
np2_len = int(float(align['NP2_LENGTH']))
except (TypeError, ValueError):
np2_len = 0
# NP nucleotide additions
if np2_len < 0:
result_log['ERROR'] = 'NP2_LENGTH is negative'
return result_log, germlines
regions += 'N' * np2_len
germ_seq += 'N' * np2_len
# Add J-region
germ_seq += germ_jseq
regions += 'J' * len(germ_jseq)
# Full length and regions of germline seq, might be useful in the future
germlines_full = germ_seq
germlines_regions = regions
seq_dmask = germ_seq[:len(germ_vseq)] + \
'N' * (len(germ_seq) - len(germ_vseq) - len(germ_jseq)) + \
germ_seq[-len(germ_jseq):]
ig_dict['GERMLINE_IMGT_D_MASK'] = seq_dmask
return ig_dict
def gapV(ig_dict, repo_dict):
"""
Insert gaps into V region and update alignment information
Arguments:
ig_dict : Dictionary of parsed IgBlast output
repo_dict : Dictionary of IMGT gapped germline sequences
Returns:
dict : Updated with SEQUENCE_IMGT, V_GERM_START_IMGT, and V_GERM_LENGTH_IMGT fields
"""
seq_imgt = '.' * (int(ig_dict['V_GERM_START_VDJ']) - 1) + ig_dict['SEQUENCE_VDJ']
#print("seq_imgt before gapping in gapV()", seq_imgt, file = sys.stderr)
# present before gapping
# Find gapped germline V segment
vkey = ig_dict['V_ALLELE']
#print("keys in repo_dict:", repo_dict.keys(), file=sys.stderr)
#print("vkey of interest:", vkey, file=sys.stderr)
if vkey in repo_dict:
vgap = repo_dict[vkey]
#print("vgap present?", vgap, file=sys.stderr)
# Iterate over gaps in the germline segment
gaps = re.finditer(r'\.', vgap)
#print("vgap present?", gaps, file=sys.stderr)
gapcount = int(ig_dict['V_GERM_START_VDJ']) - 1
#print("gapcount present?", gapcount, file=sys.stderr)
for gap in gaps:
i = gap.start()
# Break if gap begins after V region
if i >= ig_dict['V_GERM_LENGTH_VDJ'] + gapcount:
break
# Insert gap into IMGT sequence
seq_imgt = seq_imgt[:i] + '.' + seq_imgt[i:]
# Update gap counter
gapcount += 1
#print("seq_imgt after gapping", seq_imgt, file=sys.stderr)
ig_dict['SEQUENCE_IMGT'] = seq_imgt
# Update IMGT positioning information for V
ig_dict['V_GERM_START_IMGT'] = 1
ig_dict['V_GERM_LENGTH_IMGT'] = ig_dict['V_GERM_LENGTH_VDJ'] + gapcount
# print("seq_imgt keys() check after gapping in Vgap()", ig_dict.keys(), file=sys.stderr)
return ig_dict
def readOneIgBlastResult(block):
"""
Parse a single IgBlast query result
Arguments:
block = itertools groupby object of single result
Returns:
None if no results, otherwise list of DataFrames for each result block
"""
results = list()
i = 0
for match, subblock in groupby(block, lambda l: l=='\n'):
if not match:
# Strip whitespace and comments, and clonal parts
sub = [s.strip() for s in subblock if not s.startswith((
'#', 'Total queries', 'Total identifiable CDR3',
'Total unique clonotypes'))]
# Continue on empty block
if not sub: continue
else: i += 1
# Split by tabs
sub = [s.split('\t') for s in sub]
# Append list for "V-(D)-J rearrangement summary" (i == 1)
# And "V-(D)-J junction details" (i == 2)
# Otherwise append DataFrame of subblock
if i == 1 or i == 2:
results.append(sub[0])
# skip query result if no CDR3 info
elif i == 3 and sub[0][0] == 'CDR3':
results.append(sub[0])
else:
df = pd.DataFrame(sub)
if not df.empty: results.append(df)
return results if results else None
def writeIgBlastDb(IgBlast_output, IgBlast_db, seq_file, repo_dict, subset, Primer_Jlen, Dupstream=None):
'''
Main for IgBlast aligned sample sequences, a parsed file in succinct fashion
will be generated for each output file.
Arguments:
IgBlast_output = IgBlast output file to process
IgBlast_db = IgBlast database file to write
seq_file = Query sequence file to read
repo_dict = dictionary of IMGT gapped germline sequences
Returns:
None
'''
# Define ordered outputs
ordered_fields = ['SEQUENCE_ID',
"V_GENE", "V_ALLELE", "D_ALLELE", "J_ALLELE",
"STOP", "IN_FRAME", "PRODUCTIVE", "CHAIN_TYPE", "STRAND",
"V_END", "V_D_JUNCTION", "D_REGION",
"D_J_JUNCTION", "J_START", "V_J_JUNCTION",
"V_SCORE", "V_ALIGNMENT", "V_MISMATCH", "V_MUTATION",
"INDEL", "V_IDENTITY", "V_COVERAGE",
"V_BTOP", "V_GENE_LEN", "V_GENE_GAP_LEN",
"D_SCORE", "D_ALIGNMENT",
"J_SCORE", "J_ALIGNMENT", "J_MUTATION", "J_MUTATION_NOPRIMER",
"CDR1_SEQ", "CDR1_PEPTIDE",
"CDR2_SEQ", "CDR2_PEPTIDE",
"CDR3_SEQ", "CDR3_PEPTIDE",
"V_CALL", "D_CALL", "J_CALL",
'D_SEQ_START', 'D_SEQ_LENGTH', 'D_GERM_START', 'D_GERM_LENGTH',
'J_SEQ_START', 'J_SEQ_LENGTH', 'J_GERM_START', 'J_GERM_LENGTH',
"NP1_LENGTH", "NP2_LENGTH",
"V_SEQ_START", "V_SEQ_LENGTH",
"V_GERM_START_VDJ", "V_GERM_END_VDJ",
"V_GERM_START_IMGT", "V_GERM_LENGTH_IMGT",
"V_ALLELE_NUC", "GERMLINE_IMGT_D_MASK",
"SEQUENCE_INPUT", "SEQUENCE_VDJ", "SEQUENCE_IMGT"]
if Dupstream:
ordered_fields.append('D_UPSTREAM_MATCH_D_GENE')
ordered_fields.append('D_UPSTREAM_STITCH_D_GENE')
# Initialize IgBlast db
db_handle = open(IgBlast_db, 'wt')
db_writer = csv.writer(db_handle, delimiter = "\t")
# Yyx changed back 2021-06-02, bottleneck is not output
db_writer.writerow(ordered_fields)
# print('\t'.join(ordered_fields), file=db_handle)
# Get input sequence dictionary
seq_dict = getInputSeq(seq_file)
# Yyx changed below on 2021-06-02
def parseOneBlock(block):
# block = list(block)
# if k1: continue
# Initialize db_gen
db_gen = {}
for item in ordered_fields:
db_gen[item] = "-"
# Extract sequence ID
query_name = ' '.join(block[0].strip().split(' ')[2:])
# Parse db_gen to have ID and input sequence
db_gen['SEQUENCE_ID'] = query_name
# Yyx add 2021-04-15, should check strand, reverse complement if query is minus strand
should_reverse_complement = any(('your query represents the minus strand' in x) for x in block)
# Parse further sub-blocks
block_list = readOneIgBlastResult(block)
# Skip read without alignment or V alignment
if block_list is None: return
if block_list[0][0] == 'N/A': return
# Penultimate has to be dataframe of FR&CDR position
if isinstance(block_list[-2], list): return
# Parse quality information
db_gen['STRAND'] = block_list[0][-1]
db_gen['SEQUENCE_INPUT'] = seq_dict[query_name]
if db_gen['STRAND'] == '-':
if Bio_Alphabet:
db_gen['SEQUENCE_INPUT'] = str(Seq(db_gen['SEQUENCE_INPUT'],
IUPAC.ambiguous_dna).reverse_complement())
else:
db_gen['SEQUENCE_INPUT'] = str(Seq(db_gen['SEQUENCE_INPUT']).reverse_complement())
if block_list[0][-2] == 'Yes': db_gen['PRODUCTIVE'] = 'T'
if block_list[0][-2] == 'No': db_gen['PRODUCTIVE'] = 'F'
if block_list[0][-3] == 'In-frame': db_gen['IN_FRAME'] = 'T'
if block_list[0][-3] == 'Out-of-frame': db_gen['IN_FRAME'] = 'F'
if block_list[0][-4] == 'Yes': db_gen['STOP'] = 'T'
if block_list[0][-4] == 'No': db_gen['STOP'] = 'F'
db_gen['CHAIN_TYPE'] = block_list[0][-5]
# Parse J call
if block_list[0][-6] != 'N/A': db_gen['J_CALL'] = block_list[0][-6]
db_gen['J_ALLELE'] = db_gen['J_CALL'].split(',')[0]
# Parse D call
if block_list[0][3] == 'VH':
if block_list[0][1] != 'N/A': db_gen['D_CALL'] = block_list[0][1]
db_gen['D_ALLELE'] = db_gen['D_CALL'].split(',')[0]
# Parse V call
if block_list[0][0] != 'N/A': db_gen['V_CALL'] = block_list[0][0]
db_gen['V_ALLELE'] = db_gen['V_CALL'].split(',')[0]
db_gen['V_GENE'] = db_gen['V_ALLELE'].split('*')[0]
# Parse junction sequence
if len(block_list[1]) >= 5:
# ALTERNATIVELY FILTER BY CHAIN_TYPE
# if db_gen['CHAIN_TYPE'] == 'VH':
if block_list[1][0] != 'N/A': db_gen['V_END'] = block_list[1][0]
if block_list[1][1] != 'N/A': db_gen['V_D_JUNCTION'] = block_list[1][1]
if block_list[1][2] != 'N/A': db_gen['D_REGION'] = block_list[1][2]
if block_list[1][3] != 'N/A': db_gen['D_J_JUNCTION'] = block_list[1][3]
if block_list[1][4] != 'N/A': db_gen['J_START'] = block_list[1][4]
# ALTERNATIVELY FILTER BY CHAIN_TYPE
# elif db_gen['CHAIN_TYPE'] == 'VK':
elif len(block_list[1]) == 3:
if block_list[1][0] != 'N/A': db_gen['V_END'] = block_list[1][0]
if block_list[1][1] != 'N/A': db_gen['V_J_JUNCTION'] = block_list[1][1]
if block_list[1][2] != 'N/A': db_gen['J_START'] = block_list[1][2]
# Parse CDR 1 & 2 sequence, should get reverse complement before extract
# if subset == 'unjoinR2':
# inputseq = seq_dict[query_name]
# else:
if Bio_Alphabet:
inputseq = Seq(seq_dict[query_name], IUPAC.ambiguous_dna)
else:
inputseq = Seq(seq_dict[query_name])
# Yyx add 2021-04-15, should check strand, reverse complement if query is minus strand
if should_reverse_complement:
inputseq = inputseq.reverse_complement()
inputseq = str(inputseq)
# CDR 1 & 2 block is dataframe
hit_regions = block_list[-2]
regions = list(hit_regions.loc[:,0])
# Parse CDR1/2 only when exist FR1/2
FR1_EXIST = 0
FR2_EXIST = 0
if 'FR1-IMGT' in regions or 'FR1' in regions: FR1_EXIST = 1
if 'FR2-IMGT' in regions or 'FR2' in regions: FR2_EXIST = 1
for key, row in hit_regions.iterrows():
if row[0].startswith('CDR1') and FR1_EXIST == 1:
db_gen['CDR1_SEQ'] = inputseq[int(row[1])-1: int(row[2])]
if Bio_Alphabet:
db_gen['CDR1_PEPTIDE'] = str(Seq(db_gen['CDR1_SEQ'],
generic_dna).translate())
else:
db_gen['CDR1_PEPTIDE'] = str(Seq(db_gen['CDR1_SEQ']).translate())
if row[0].startswith('CDR2') and FR2_EXIST == 1:
db_gen['CDR2_SEQ'] = inputseq[int(row[1])-1: int(row[2])]
if Bio_Alphabet:
db_gen['CDR2_PEPTIDE'] = str(Seq(db_gen['CDR2_SEQ'],
generic_dna).translate())
else:
db_gen['CDR2_PEPTIDE'] = str(Seq(db_gen['CDR2_SEQ']).translate())
# CDR 1 & 2 block is list
if isinstance(block_list[2], list) and len(block_list[2]) > 2:
db_gen['CDR3_SEQ'] = block_list[2][1]
db_gen['CDR3_PEPTIDE'] = block_list[2][2]
# Parse segment start and stop positions
hit_df = block_list[-1]
seq_vdj = ''
v_align = hit_df[hit_df[0] == 'V'].iloc[0]
# Alignment length and mismatch
db_gen['V_IDENTITY'] = '%.3f' % (float(v_align[3]) / 100.0)
db_gen['V_ALIGNMENT'] = v_align[4]
db_gen['V_MISMATCH'] = v_align[5]
db_gen['INDEL'] = v_align[6]
db_gen['V_SEQ_START'] = int(v_align[8])
db_gen['V_SEQ_LENGTH'] = int(v_align[9]) - db_gen['V_SEQ_START'] + 1
db_gen['V_GERM_START_VDJ']= int(v_align[10])
db_gen['V_GERM_END_VDJ']= int(v_align[11])
db_gen['V_GERM_LENGTH_VDJ'] = int(v_align[11]) - db_gen['V_GERM_START_VDJ'] + 1
db_gen['V_SCORE'] = v_align[13]
db_gen['V_GENE_LEN'] = int(v_align[15])
db_gen['V_COVERAGE'] = '%.3f' % (float(v_align[4])/float(v_align[15]))
db_gen['V_BTOP'] = v_align[16]
# Update VDJ sequence, removing insertions
start = 0
for m in re.finditer(r'-', v_align[18]):
ins = m.start()
seq_vdj += v_align[17][start:ins]
start = ins + 1
seq_vdj += v_align[17][start:]
# D alignment results
if db_gen['D_CALL'] != "-":
d_align = hit_df[hit_df[0] == 'D'].iloc[0]
db_gen['D_ALIGNMENT'] = d_align[4]
db_gen['D_SCORE'] = d_align[13]
# Determine N-region length and amount of J overlap with V or D alignment
overlap = 0
np1_len = int(d_align[8]) - (db_gen['V_SEQ_START'] + db_gen['V_SEQ_LENGTH'])
if np1_len < 0:
db_gen['NP1_LENGTH'] = 0
overlap = abs(np1_len)
else:
db_gen['NP1_LENGTH'] = np1_len
n1_start = (db_gen['V_SEQ_START'] + db_gen['V_SEQ_LENGTH']-1)
n1_end = int(d_align[8])-1
seq_vdj += db_gen['SEQUENCE_INPUT'][n1_start:n1_end]
# Query sequence positions
db_gen['D_SEQ_START'] = int(d_align[8]) + overlap
db_gen['D_SEQ_LENGTH'] = max(int(d_align[9]) - db_gen['D_SEQ_START'] + 1, 0)
# Germline positions
db_gen['D_GERM_START'] = int(d_align[10]) + overlap
db_gen['D_GERM_LENGTH'] = max(int(d_align[11]) - db_gen['D_GERM_START'] + 1, 0)
# Update VDJ sequence, removing insertions
start = overlap
for m in re.finditer(r'-', d_align[18]):
ins = m.start()
seq_vdj += d_align[17][start:ins]
start = ins + 1
seq_vdj += d_align[17][start:]
# J alignment results
if db_gen['J_CALL'] != "-":
j_align = hit_df[hit_df[0] == 'J'].iloc[0]
db_gen['J_ALIGNMENT'] = j_align[4]
db_gen['J_MUTATION'] = sum(1 for a, b in zip(j_align[17], j_align[18]) if (a != b and a !='N'))
vl = Primer_Jlen
db_gen['J_MUTATION_NOPRIMER'] = sum(1 for a, b in zip(j_align[17][0:-vl], j_align[18][0:-vl])
if (a != b and a !='N'))
db_gen['J_SCORE'] =j_align[13]
# Determine N-region length and amount of J overlap with V or D alignment
overlap = 0
if db_gen['D_CALL'] != "-":
np2_len = int(j_align[8]) - (db_gen['D_SEQ_START'] + db_gen['D_SEQ_LENGTH'])
if np2_len < 0:
db_gen['NP2_LENGTH'] = 0
overlap = abs(np2_len)
else:
db_gen['NP2_LENGTH'] = np2_len
n2_start = (db_gen['D_SEQ_START']+db_gen['D_SEQ_LENGTH']-1)
n2_end = int(j_align[8])-1
seq_vdj += db_gen['SEQUENCE_INPUT'][n2_start:n2_end]
elif db_gen['V_CALL'] != "-":
np1_len = int(j_align[8]) - (db_gen['V_SEQ_START'] + db_gen['V_SEQ_LENGTH'])
if np1_len < 0:
db_gen['NP1_LENGTH'] = 0
overlap = abs(np1_len)
else:
db_gen['NP1_LENGTH'] = np1_len
n1_start = (db_gen['V_SEQ_START']+db_gen['V_SEQ_LENGTH']-1)
n1_end = int(j_align[8])-1
seq_vdj += db_gen['SEQUENCE_INPUT'][n1_start:n1_end]
else:
db_gen['NP1_LENGTH'] = 0
# Query positions
db_gen['J_SEQ_START'] = int(j_align[8]) + overlap
db_gen['J_SEQ_LENGTH'] = max(int(j_align[9]) - db_gen['J_SEQ_START'] + 1, 0)
# Germline positions
db_gen['J_GERM_START'] = int(j_align[10]) + overlap
db_gen['J_GERM_LENGTH'] = max(int(j_align[11]) - db_gen['J_GERM_START'] + 1, 0)
# Update VDJ sequence, removing insertions
start = overlap
for m in re.finditer(r'-', j_align[18]):
ins = m.start()
seq_vdj += j_align[17][start:ins]
start = ins + 1
seq_vdj += j_align[17][start:]
db_gen['SEQUENCE_VDJ'] = seq_vdj
# Create IMGT-gapped sequence and infer IMGT junction
if not db_gen['V_ALLELE'].endswith('_DS'):
# print("perform gapV: is db_gen['SEQUENCE_IMGT'] present before gapV?", db_gen['SEQUENCE_IMGT'], file=sys.stderr)
# "-" as expected
db_gen = gapV(db_gen, repo_dict)
### initialized upstream: repo_dict = getInputSeq(args.Vgapseq)
#print("perform gapV: is db_gen['SEQUENCE_IMGT'] present after gapV()?", db_gen['SEQUENCE_IMGT'], file=sys.stderr)
# "-" empty afterwards
db_gen = dmask(db_gen, repo_dict)
db_gen = mutV(db_gen)
# Update two unique columns for D upstream
elif Dupstream:
db_gen['D_UPSTREAM_STITCH_D_GENE'] = 'F'
if db_gen['V_GENE'].replace('_DS', '') in db_gen['D_CALL'].split(','):
db_gen['D_UPSTREAM_MATCH_D_GENE'] = 'T'
if db_gen['V_GENE_LEN'] == db_gen['V_GERM_END_VDJ']:
db_gen['D_UPSTREAM_STITCH_D_GENE'] = 'T'
else:
db_gen['D_UPSTREAM_MATCH_D_GENE'] = 'F'
# Yyx changed back 2021-06-02, bottleneck is not output
db_writer.writerow([db_gen[f] for f in ordered_fields])
# print('\t'.join([str(db_gen[f]) for f in ordered_fields]), file=db_handle)
IGBLASTN_pattern = re.compile('# IGBLASTN')
with open(IgBlast_output) as f:
# Iterate over individual results (separated by # IgBlastN)
# Yyx changed back on 2021-06-02, the bottleneck is not input
for k1, block in groupby(f, lambda x: IGBLASTN_pattern.match(x)):
if k1: continue
parseOneBlock(list(block))
# k1 = ''
# block = []
# for x in f:
# if IGBLASTN_pattern.match(x):
# if k1 != '':
# parseOneBlock(block)
# k1 = x
# block = []
# else:
# block.append(x)
# if k1 != '':
# parseOneBlock(block)
db_handle.close()
def run_one_IgBlast(sample, args):
''' run IgBlast agaist one sample
'''
# run IgBlast
if args.VDJdatabase.startswith('IG'):
seqtype = 'Ig'
if args.VDJdatabase.startswith('TR'):
seqtype = 'TCR'
for subset in args.readtypes:
eachdir = '%s/%s' % (args.outdir, sample)
seq_file = '%s/%s_%s.fa' % (eachdir, sample, subset)
IgBlast_output = '%s/%s_%s.IgBlast' % (eachdir, sample, subset)
cmdline = '%s/external_software/igblastn -query %s -organism %s' \
' -germline_db_V %s -germline_db_D %s -germline_db_J %s' \
' -auxiliary_data %s -ig_seqtype %s -domain_system %s' \
' -outfmt "7 std qlen slen btop qseq sseq" -out %s' \
' -num_clonotype 0 -num_threads 2' % (
args.scriptdir, seq_file, args.organism,
args.Vdb, args.Ddb, args.Jdb,
args.auxiliary_data, seqtype, args.domain_system,
IgBlast_output)
os.environ['IGDATA'] = '%s/database/' % args.scriptdir
os.environ['BLASTDB'] = '%s/database/' % args.scriptdir
loggingRun(cmdline)
def _write_arguments(logfile, args):
with open(logfile.replace('.log', '.param'), 'w') as param_file:
writer = csv.writer(param_file, delimiter="\t")
for key in args.__dict__:
writer.writerow([key, args.__dict__[key]])
def run_IgBlast(args):
''' Run IgBlast search for each sample
'''
logging.info('Runing IgBlast......')
# perform IgBlast search
if args.skipIgBlast is False:
# for sample in metadict: run_one_IgBlast(sample, args)
pool = multiprocessing.Pool(processes = args.nproc)
for sample in args.metadict:
pool.apply_async(run_one_IgBlast, (sample, args, ))
pool.close()
pool.join()
# clean up files if no need to do IgBlast search
# will never be run if skipIgBlast
else:
for sample in args.metadict:
if os.path.exists('%s/%s/IgBlast_raw' % (args.outdir, sample)):
os.system('mv {0}/{1}/IgBlast_raw/* {0}/{1}/'.format(args.outdir, sample))
if os.path.exists('%s/%s/IgBlast_results' % (args.outdir, sample)):
os.system('mv {0}/{1}/IgBlast_results/* {0}/{1}/'.format(args.outdir, sample))
if os.path.exists('%s/%s/reads_fasta/' % (args.outdir, sample)):
os.system('mv {0}/{1}/reads_fasta/* {0}/{1}/'.format(args.outdir, sample))
# 04262021 JH added -q
os.system('gunzip -q %s/%s/*.gz' % (args.outdir, sample))
def parse_one_Record(row, args):
"""
Parse a row of one join read
Arguments:
row = One record in IgBlast dataframe
Returns:
A list of tags
"""
# Fail tags in join reads
fail_tags = ['V_NO_ALIGNMENT', 'V_LOW_SCORE', 'V_LOW_IDENTITY',
'V_LOW_COVERAGE', 'V_SHORT_ALIGNMENT', 'V_ALIGN_DIFF_GENE',
'J_NO_ALIGNMENT', 'J_SHORT_ALIGNMENT', 'J_NOT_MATCH_JGENE',
'NO_PRODCTIVE_INFO', 'D_STITCH_UPSTREAM_SHORT_ALIGNMENT',
'D_UPSTREAM_SHORT_ALIGNMENT', 'R1_R2_V_NO_MATCH']
tags = []
# Parse D upstream alignment for upstream alignment when required
if args.D_upstream and row['V_ALLELE'].endswith('_DS'):
if row['V_GERM_END_VDJ'] == row['V_GENE_LEN']:
if int(row['V_ALIGNMENT']) < args.D_upstream_stitch_length:
tags.append('D_STITCH_UPSTREAM_SHORT_ALIGNMENT')
else:
if int(row['V_ALIGNMENT']) < args.D_upstream_length:
tags.append('D_UPSTREAM_SHORT_ALIGNMENT')
# Parse V gene
if row['V_ALLELE'] == '-':
tags.append('V_NO_ALIGNMENT')
else:
if float(row['V_SCORE']) < args.V_score:
tags.append('V_LOW_SCORE')
if float(row['V_IDENTITY']) < args.V_identity:
tags.append('V_LOW_IDENTITY')
if float(row['V_COVERAGE']) < args.V_coverage:
tags.append('V_LOW_COVERAGE')
if int(row['V_ALIGNMENT']) < args.V_length:
tags.append('V_SHORT_ALIGNMENT')
vlist = [ v.split('*')[0] for v in row['V_CALL'].split(',') ]
if len(set(vlist)) > 1:
tags.append('V_ALIGN_DIFF_GENE')
if args.checkProductive and row['PRODUCTIVE'] == '-':
tags.append('NO_PRODCTIVE_INFO')
# Parse J gen=-09
if not args.skipJAlignmentFilter:
if row['J_ALLELE'] == '-':
tags.append('J_NO_ALIGNMENT')
else:
if int(row['J_ALIGNMENT']) < args.J_length:
tags.append('J_SHORT_ALIGNMENT')
if args.J_gene and args.J_gene not in row['J_ALLELE']:
tags.append('J_NOT_MATCH_JGENE')
# Parse R1&R2 V allele
if 'V_ALLELE_R2' in row:
if row['V_ALLELE_R2'] != row['V_ALLELE']:
tags.append('R1_R2_V_NO_MATCH')
tags = [t for t in tags if t in fail_tags]
if len(tags) > 0:
return '|'.join(tags)
else:
return '-'
def parse_one_IgBlast(IgBlast_r1, args, IgBlast_r2=None):
"""
Parse a joined IgBlast (R1 only) or unjoined R1 & R2
Arguments:
IgBlast_r1 = IgBlast join database or R1 database (if R2 specified)
IgBlast_r2 = IgBlast R2 database (optional)
args = Input arguments
Returns:
A pd dataframe containing annotation tags
"""
if IgBlast_r2 is None:
# R1 = pd.read_csv(IgBlast_r1, sep="\t", low_memory=False)
R1 = getCSV(IgBlast_r1)
if len(R1) == 0:
return R1
R1['JOINED'] = 'T'
R1["NOTE"] = R1.apply(parse_one_Record, args=(args,), axis=1)
R1["PASS"] = R1["NOTE"].apply(lambda x: 'T' if x == '-' else 'F')
return R1
else:
# Fail tags in R1 & R2 reads
R1 = getCSV(IgBlast_r1)
R2 = getCSV(IgBlast_r2)
if len(R1) == 0 or len(R2) == 0:
return R1
R1['JOINED'] = 'F'
# Replace following fields in R2 to replace in R1
replace_fields = ['V_SCORE', 'V_ALIGNMENT', 'V_MISMATCH',
'INDEL', 'V_MUTATION',
'V_IDENTITY', 'V_COVERAGE', 'V_BTOP',
'V_GERM_START_VDJ', 'V_GERM_END_VDJ']
replace_CDR12 = ['CDR1_SEQ', 'CDR1_PEPTIDE', 'CDR2_SEQ', 'CDR2_PEPTIDE']
# Obtain paired reads
R1 = R1[R1['SEQUENCE_ID'].isin(R2['SEQUENCE_ID'])].sort_values('SEQUENCE_ID')
R2 = R2[R2['SEQUENCE_ID'].isin(R1['SEQUENCE_ID'])].sort_values('SEQUENCE_ID')
R1 = R1.reset_index(drop=True)
R2 = R2.reset_index(drop=True)
R1['V_SCORE_R2'] = R2['V_SCORE']
R1['V_ALLELE_R2'] = R2['V_ALLELE']
# Use R2 score if higher
R1[replace_CDR12] = R2[replace_CDR12]
idx = R1.loc[R1['V_SCORE']<R1['V_SCORE_R2']].index
R1.loc[idx,replace_fields] = R2.loc[idx,replace_fields]
R1["NOTE"] = R1.apply(parse_one_Record, args=(args,), axis=1)
R1["PASS"] = R1["NOTE"].apply(lambda x: 'T' if x == '-' else 'F')
R1.drop(['V_SCORE_R2', 'V_ALLELE_R2'], inplace=True, axis=1)
return R1
def summarize_one_IgBlast(sample, records, args):
''' Summarize the V usage in one IgBlast task
'''
# Prepare V annotation file
if args.Vannotation:
annofile = args.Vannotation
else:
annofile = '%s/database/annotation/%s_%s_anno.txt' % (
args.scriptdir, args.organism, args.VDJdatabase)
if args.mousestrain in ['B6']:
annofile = '%s/database/annotation/mouse_%s_B6_anno.txt' % (
args.scriptdir, args.VDJdatabase)
# with no specific organism and VDJdatabase, /usr/pipelines/HTGTSrep_pipeline/database/annotation/mouse_IGH_anno.txt is the default
# /usr/pipelines/HTGTSrep_pipeline/database/annotation/mouse_IGH_anno.txt has both B6 and mm129
# Summarize one IgBlast output
gene_count = {}
for key, group in records.groupby('V_GENE'):
gene = group["V_GENE"].unique()[0]
gene_pass = len(group)
gene_pass_productive = len(group.loc[group['PRODUCTIVE']=='T'])
gene_pass_non_productive = len(group.loc[group['PRODUCTIVE']=='F'])
gene_count[gene] = [gene_pass_productive, gene_pass_non_productive, gene_pass]
# Do stat summarize
# First four cols are from db file and used for stat
nameslist = ["V_GENE", "LOCUS", "FUNCTIONAL",
"PRODUCTIVE", "NON_PRODCTIVE", "TOTAL"]
if os.path.exists(annofile):
V_stat = pd.read_csv(annofile, names=nameslist, sep="\t").fillna(0)
# Filter out non-mutated reads and drop V_MUTATION col
for gene in gene_count:
# update from ix to loc
gene_isin = V_stat.loc[V_stat["V_GENE"].isin([gene]), ]
if len(gene_isin) > 0:
index = gene_isin.index[0]
# update from ix to iloc
V_stat.iloc[index, 3:6] = gene_count[gene]
else:
newrow = [gene, "-", "-"] + gene_count[gene]
# update from ix to iloc
V_stat.loc[len(V_stat)+1] = newrow
print(newrow, file=sys.stderr)
else:
V_stat = | pd.DataFrame(columns=nameslist) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__contact__ = "gambrosio[at]uma.es"
__copyright__ = "Copyright 2021, <NAME>"
__date__ = "2021/07/27"
__license__ = "MIT"
import sys
import datetime as dt
import sqlite3
import os
import cv2
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet import image
from mxnet import context
import gluoncv as gcv
from gluoncv import model_zoo, data, utils
# import fiftyone as fo
# from matplotlib import pyplot as plt
# import helpers
import robotathome as rh
# import fire
class RobotAtHome():
"""RobotAtHome class with methods for Robot@Home dataset v2.x.y
The RobotAtHome class encapsulates methods to access the RobotAtHome
database. <https://doi.org/10.5281/zenodo.4530453>
Attributes:
rh_path (str, optional):
root path for robotathome database, usually rh.db
wspc_path (str, optional):
workspace path where temporary files are stored
db_filename (str, optional):
default database name
rgbd_path (str, optional):
path that completes rh_path, where rgbd files are stored
scene_path (str, optional):
path that coompletes rh_path, where scene files are stored
"""
def __init__(self,
rh_path='.',
wspc_path='.',
db_filename='rh.db',
rgbd_path='files/rgbd',
scene_path='files/scene'):
""" RobotAtHome constructor method """
self.__rh_path = rh_path
self.__wspc_path = wspc_path
self.__db_filename = db_filename
self.__rgbd_path = rgbd_path
self.__scene_path = scene_path
self.__con = None
self.__rgbd_views = []
# Initialization functions
self.__open_dataset()
self.__create_temp_views()
def __del__(self):
""" Robot@Home destructor method"""
def __open_dataset(self):
"""
This function makes the connection with the database and calls the
initialization functions, e.g. create temporal views
"""
db_full_path = os.path.join(self.__rh_path, self.__db_filename)
rh.logger.debug("db_full_path: {}", db_full_path)
try:
self.__con = sqlite3.connect(db_full_path)
rh.logger.info("Connection is established: {}", self.__db_filename)
except NameError:
rh.logger.error("Error while trying to open database: {}", NameError)
def __close_dataset(self):
"""
This function closes the connection with the database
"""
self.__con.close()
rh.logger.info("The connection with the database has been successfully closed")
def __create_temp_views(self):
"""
This function creates temporary views to work on the class environment
"""
sql_str = '''
begin transaction;
drop view if exists rh_temp_lblrgbd;
create temp view rh_temp_lblrgbd as
select
rh_lblrgbd.id,
rh_lblrgbd.home_session_id as hs_id,
rh_home_sessions.name as hs_name,
rh_lblrgbd.home_subsession_id as hss_id,
rh_lblrgbd.home_id as h_id,
rh_homes.name as h_name,
rh_lblrgbd.room_id as r_id,
rh_rooms.name as r_name,
rh_lblrgbd.sensor_id as s_id,
rh_sensors.name as s_name,
rh_lblrgbd.time_stamp as t,
rh_lblrgbd.sensor_pose_x as s_px,
rh_lblrgbd.sensor_pose_y as s_py,
rh_lblrgbd.sensor_pose_z as s_pz,
rh_lblrgbd.sensor_pose_yaw as s_pya,
rh_lblrgbd.sensor_pose_pitch as s_ppi,
rh_lblrgbd.sensor_pose_roll as s_pro,
rh2_old2new_rgbd_files.new_file_1 as f1,
rh2_old2new_rgbd_files.new_file_2 as f2,
rh2_old2new_rgbd_files.new_file_3 as f3,
rh2_old2new_rgbd_files.new_path as pth
from rh_lblrgbd
inner join rh_home_sessions on home_session_id = rh_home_sessions.id
inner join rh_homes on rh_lblrgbd.home_id = rh_homes.id
inner join rh_rooms on rh_lblrgbd.room_id = rh_rooms.id
inner join rh_sensors on rh_lblrgbd.sensor_id = rh_sensors.id
inner join rh2_old2new_rgbd_files on rh2_old2new_rgbd_files.id = rh_lblrgbd.id;
commit;
'''
# Get a cursor to execute SQLite statements
cur = self.__con.cursor()
cur.executescript(sql_str)
self.__rgbd_views.append("rh_temp_lblrgbd")
rh.logger.trace("The view rh_temp_lblrgbd has been created")
def get_con(self):
"""
This function returns the sql connection variable
"""
return self.__con
def select_column(self, column_name, table_name):
'''
Returns a dataframe with grouped column values
(without repetition)
'''
# Get a cursor to execute SQLite statements
cur = self.__con.cursor()
# Build the query
# sql_str = ("select " + column_name + " from " + table_name + " group by " + column_name + ";")
# rows = cur.execute(sql_str)
# rh.logger.debug(rows)
# for row in rows:
# print(row)
# rh.logger.debug(rows2list(rows))
sql_str = (f"select {column_name} from {table_name} group by {column_name};")
df_rows = | pd.read_sql_query(sql_str, self.__con) | pandas.read_sql_query |
import os
import logging
from notion.client import NotionClient
import numpy as np
import pandas as pd
import yfinance as yf
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import ParseMode, ReplyKeyboardMarkup, KeyboardButton
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# settings
TELEGRAM_TOKEN = os.environ['TELEGRAM_TOKEN']
NOTION_TOKEN = os.environ['NOTION_TOKEN']
LIMIT = os.environ['CREDIT_LIMIT']
POWER_USER_ID = int(os.environ['POWER_USER_ID'])
POWER_USER_NAME = os.environ['POWER_USER_NAME']
notion_balance = "https://www.notion.so/chenchiks/2062899533a048579f572a7e3d40182f?v=1fb6c93b1a5045af9ea3a83b4aa90dd0"
notion_transactions = "https://www.notion.so/chenchiks/1604cc3bb0614273a690710f17b138ca?v=8f278effcac4457d803aeb5cc0a1c93e"
credit_limit = int(LIMIT)
recalculate = "Recalculate"
newlink = "Update numbers"
recalculate_keyboard = KeyboardButton(text=recalculate)
link_keyboard = KeyboardButton(text=newlink)
custom_keyboard = [[recalculate_keyboard, link_keyboard]]
reply_markup = ReplyKeyboardMarkup(custom_keyboard, resize_keyboard=True)
newlink_filter = Filters.text([newlink]) & (
Filters.user(user_id=POWER_USER_ID) | Filters.user(username=POWER_USER_NAME))
recalculate_filter = Filters.text([recalculate]) & (
Filters.user(user_id=POWER_USER_ID) | Filters.user(username=POWER_USER_NAME))
in_known_filters = newlink_filter | recalculate_filter | Filters.command('start')
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm a bot, please talk to me!",
reply_markup=reply_markup)
def unknown(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm sorry Dave I'm afraid I can't do that.",
reply_markup=reply_markup)
def daily_status(day, date, planned_month, daily):
return (
planned_month[planned_month["transaction_time"] <= date][
"transaction_amount"
].sum()
- (day + 1) * daily
)
def transactions_left(date, planned_month):
return planned_month[planned_month["transaction_time"] > date][
"transaction_amount"
].sum()
def transactions_made(date, planned_month):
return planned_month[planned_month["transaction_time"] <= date][
"transaction_amount"
].sum()
def generate_link(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="5 sec")
now = pd.Timestamp(datetime.now().timestamp(), unit="s").to_period(freq="M")
month = now
client = NotionClient(token_v2=NOTION_TOKEN)
cv = client.get_collection_view(notion_balance)
notion_data = [[row.id, row.date.start, row.credit, row.cash, row.usd] for row in cv.collection.get_rows()]
balance = | pd.DataFrame(notion_data, columns=['id', 'balance_time', 'Credit', 'Cash', 'USD']) | pandas.DataFrame |
from functools import lru_cache
from pyiso import client_factory
from datetime import datetime, timedelta
from funcy import compose, identity, retry
from itertools import repeat
from urllib.error import HTTPError
import pandas as pd
import numpy as np
from app.model import RENEWABLES, NON_RENEWABLES
from app.util import full_hour_series
@retry(5, errors=HTTPError)
@lru_cache()
def raw_generation(ba_name, control_area, start, end):
entso = client_factory(ba_name)
data = entso.get_generation(
latest=True, control_area=control_area, start_at=start, end_at=end
)
return pd.DataFrame(data)
def deduplicate(raw):
"""Sum generation readings that have the same fuel_name"""
by_fuel = raw.groupby(["fuel_name", "timestamp", "freq"])
return by_fuel.agg({"gen_MW": np.sum}).reset_index()
def add_missing_megawatts(raw):
"""Add 0 generation reading for missing timestamps"""
by_fuel = raw.groupby(["fuel_name"])
return pd.concat(
[_group_without_missing_data(g) for _, g in by_fuel], ignore_index=True
)
def _group_without_missing_data(group):
source = group.iloc[0]
source_zero_mw = pd.DataFrame([source])
source_zero_mw.at[source_zero_mw.index[0], "gen_MW"] = 0
# in case first 15 minutes are missing: make sure series starts at 15 minutes past
start = source["timestamp"].replace(minute=15)
end = group.iloc[-1].timestamp
series = full_hour_series(start, end, "15min")
def filler(t):
filler = source_zero_mw.copy()
filler.at[filler.index[0], "timestamp"] = t
return filler
def df_for_timestamp(t):
df = group[group["timestamp"] == t]
return df if len(df) > 0 else filler(t)
return pd.concat([df_for_timestamp(t) for t in series], ignore_index=True)
def downsample(raw):
"""Downsample to 1 hour (original reports use 15m interval)."""
assert raw[raw["freq"] != "15m"].empty
# The last generation report of the day has a timestamp that is
# day+1 at 00:00 (each report contains data of the previous 15 minutes).
# Adjust timestamp a little to get all generation reports within the
# boundary of one day.
raw["timestamp_adjusted"] = raw["timestamp"] - pd.Timedelta("1s")
raw["date"] = raw["timestamp_adjusted"].dt.date
raw["hour"] = raw["timestamp_adjusted"].dt.hour
data = (
raw.groupby([lambda i: i // 4, "fuel_name", "date", "hour"])
.agg({"gen_MW": np.mean})
.reset_index()
.drop(["level_0"], axis=1)
)
timestamps = data.apply(
lambda x: | pd.to_datetime(x["hour"], unit="h", origin=x["date"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 09:23:26 2022
@author: <NAME> willi
"""
#%% Packages
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
import pandas as pd
from sklearn.model_selection import train_test_split
#%% Loading data (induvidual fund)
y_p = pd.read_csv(r".../Data/Clean/passive_returns_m_df.csv", index_col=0)
y_p = y_p.iloc[:,0:3].dropna()
y_a = pd.read_csv(r".../Data/Clean/active_returns_m_df.csv", index_col=0)
#x = pd.read_csv(r".../Data/Clean/x_mon_df.csv")
x = pd.read_csv(r".../Data/Clean/x_df_2.csv")
#x.drop('USREC', inplace=True, axis=1)
#%% Creating X and y (individual fund)
df = pd.merge(y_p,x, on=['year', 'month'], how = "inner")
df = df.assign(day = 1)
df.index = pd.to_datetime(df[['year', 'month', 'day']])
df.drop(['year', 'month', 'day'], inplace=True, axis=1)
y = df.iloc[:,0]
X = df.iloc[:,1:]
#%% Spliting data (induvidual fund)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
#%% Linear regression (induvidual fund)
model = linear_model.LinearRegression()
model.fit(X_train, y_train)
# regression coefficients
print('Coefficients: ', model.coef_)
# variance score: 1 means perfect prediction
print('Variance score: {}'.format(model.score(X_test, y_test)))
# plot for residual error
plt.style.use('fivethirtyeight')
plt.scatter(model.predict(X_train), model.predict(X_train) - y_train,
color = "green", s = 10, label = 'Train data')
plt.scatter(model.predict(X_test), model.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Test data')
plt.hlines(y = 0, xmin = -0.05, xmax = 0.05, linewidth = 2)
plt.legend(loc = 'upper right')
plt.title("Residual errors")
plt.show()
predictions = model.predict(X_test)
y_bar = y_test.mean()
on = sum((y_test-y_bar)**2)/y_test.shape[0]
vn = sum((y_test-predictions)**2)/y_test.shape[0]
sn = on - vn
r2 = sn/on
# =============================================================================
#%% Aggregate y
# =============================================================================
#Loading data (agg_y)
df = pd.read_csv(r".../Data/Clean/X_time_fix_mon.csv")
#%% Creating X and y (agg_y)
X = df.iloc[:,1:]
y = df.iloc[:,0]
#%% Spliting data (agg_y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
#%% Linear regression (agg_y)
model = linear_model.LinearRegression()
model.fit(X_train, y_train)
# regression coefficients
print('Coefficients: ', model.coef_)
# variance score: 1 means perfect prediction
print('Variance score: {}'.format(model.score(X_test, y_test)))
# plot for residual error
plt.style.use('fivethirtyeight')
plt.scatter(model.predict(X_train), model.predict(X_train) - y_train,
color = "green", s = 10, label = 'Train data')
plt.scatter(model.predict(X_test), model.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Test data')
plt.hlines(y = 0, xmin = -0.05, xmax = 0.05, linewidth = 2)
plt.legend(loc = 'upper right')
plt.title("Residual errors")
plt.show()
predictions = model.predict(X_test)
y_bar = y_test.mean()
on = sum((y_test-y_bar)**2)/y_test.shape[0]
vn = sum((y_test-predictions)**2)/y_test.shape[0]
sn = on - vn
r2 = sn/on
# =============================================================================
#%% Regular in loop
# =============================================================================
# Loading data
y_p = pd.read_csv(r".../Data/Clean/passive_returns_m_df.csv", index_col=0)
y_a = pd.read_csv(r".../Data/Clean/active_returns_m_df.csv", index_col=0)
x = pd.read_csv(r".../Data/Clean/x_df_2.csv")
#df = df.assign(day = 1)
#df.index = pd.to_datetime(df[['year', 'month', 'day']])
#df.drop(['year', 'month', 'day'], inplace=True, axis=1)
def num_obs(df):
obs = np.zeros(shape = (df.shape[1]-2,1))
for i in range (df.shape[1]-2):
obs[i] = df.value_counts(subset=df.columns[i+2]).shape[0]
return(obs)
n_obs_p = pd.DataFrame(num_obs(y_p))
n_obs_p.index = y_p.columns[2:]
n_obs_a = pd.DataFrame(num_obs(y_a))
n_obs_a.index = y_a.columns[2:]
n_obs_few_p = n_obs_p[n_obs_p>=24].dropna()
n_obs_few_a = n_obs_a[n_obs_a>=24].dropna()
sel_p = n_obs_few_p.index
sel_a = n_obs_few_a.index
y_2_p = y_p[sel_p]
y_2_p.insert(0,'month',y_p['month'])
y_2_p.insert(0,'year',y_p['year'])
y_2_a = y_a[sel_a]
y_2_a.insert(0,'month',y_a['month'])
y_2_a.insert(0,'year',y_a['year'])
df = pd.merge(y_2,x, on=['year', 'month'], how = "inner")
def lin_reg(df,y):
r2 = np.zeros(shape =(y.shape[1]-2,1))
for i in range(y.shape[1]-2):
df_temp = df.dropna(axis = 0, how = 'any', subset=df.columns[i+2])
y_temp = df_temp.iloc[:,i+2]
X_temp = df_temp.iloc[:,y_2.shape[1]:]
X_temp.insert(0,'const',1) # Read that adding a constatnt would help, but did not imporve the r2
X_train, X_test, y_train, y_test = train_test_split(X_temp, y_temp, test_size=0.3,
random_state=1)
model = linear_model.LinearRegression()
model.fit(X_train, y_train)
r2[i] = model.score(X_test, y_test)
return (r2)
r2_test = lin_reg(df,y_2)
r2_test = | pd.DataFrame(r2_test) | pandas.DataFrame |
# import libraries
import os, os.path
import numpy as np
import pandas as pd
# import geopandas as gpd
import sys
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import scipy
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
from datetime import date
import datetime
import time
from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
################################################################
#####
##### Function definitions
#####
################################################################
def fill_missing_doi_for_8DayEVI_Landsat_from_human_system_time(land_df):
"""
input: land_df: a dataframe that has come from the 8-day EVI landsat date set.
it includes human_system_time which is in the form of year-month-day
and it is of type timestamp
output: the same dataframe where missing values of doy column are
filled by converting the system time into doy
"""
# find index of missing values in doy column:
missing_doy_idx = land_df[land_df['doy'].isnull()].index.tolist()
new_DoY = land_df['human_system_start_time'].dt.dayofyear.values
# replace the 366 day with 0
# this works for now. (Feb 16, 2021).
# if in the future we have problem with 365 we can replace that as well.
new_DoY[new_DoY == 366] = 0
missing_doy_values = new_DoY[missing_doy_idx]
land_df.loc[missing_doy_idx, 'doy'] = missing_doy_values
return (land_df)
def generate_training_set_important_counties(data_dir, an_f_name, double_poten_dt,
perc_of_fields_to_pick = 10,
NASS_out = True, non_Irr_out = True,
perennials_out = True, last_survey = True):
needed_features = ['ID', 'Acres', 'county', 'CropTyp', 'DataSrc',
'Irrigtn', 'LstSrvD', 'Notes']
#
# initialize training dataframe
#
training_set = pd.DataFrame(data=None, index=None, columns=needed_features, dtype=None, copy=False)
#
# read the data
#
a_dataTS = pd.read_csv(data_dir + an_f_name, low_memory=False)
a_dataTS['CropTyp'] = a_dataTS['CropTyp'].str.lower()
#
# just pick the needed columns
#
a_dataTS = a_dataTS[needed_features]
#
# drop duplicate rows. We are just after field IDs
#
# print ("shape of a_dataTS is [%(nrow)d]." % {"nrow":a_dataTS.shape[0]})
a_dataTS.drop_duplicates(inplace=True)
# print ("shape of a_dataTS is [%(nrow)d]." % {"nrow":a_dataTS.shape[0]})
# print (len(a_dataTS.ID.unique()))
#
# pick eastern counties
#
eastern_counties = ["Okanogan", "Chelan", "Kittitas", "Yakima", "Klickitat", "Douglas",
"Grant", "Benton", "Ferry", "Lincoln", "Adams", "Franklin", "Walla Walla",
"<NAME>", "Stevens", "Spokane", "Whitman", "Garfield", "Columbia", "Asotin"]
a_dataTS = a_dataTS[a_dataTS.county.isin(eastern_counties)]
#
# pick important counties
#
important_counties = ["Grant", "Franklin", "Yakima", "Walla Walla", "Adams", "Benton", "Whitman"]
a_dataTS = a_dataTS[a_dataTS.county.isin(important_counties)]
#
# Filter NASS, last survey date, annuals, irrigated fields.
#
if NASS_out == True:
a_dataTS = filter_out_NASS(a_dataTS)
NASS_name = "_NASSOut_"
else:
NASS_name = "_NASSin_"
if non_Irr_out == True:
a_dataTS = filter_out_nonIrrigated(a_dataTS)
non_Irr_name = "JustIrr_"
else:
non_Irr_name = "BothIrr_"
if perennials_out == True:
a_dataTS = a_dataTS[a_dataTS.CropTyp.isin(double_poten_dt['Crop_Type'])]
Pere_name = "PereOut_"
else:
Pere_name = "PereIn_"
# print ("________________________________")
# print ("after filtering NASS stuff")
# print ("shape of a_dataTS is [%(nrow)d]." % {"nrow":a_dataTS.shape[0]})
# print (len(a_dataTS.ID.unique()))
#
# break the name and pick the year we are looking at.
#
proper_year = an_f_name.split("_")[2]
proper_year = proper_year.split(".")[0]
if last_survey == True:
a_dataTS = a_dataTS[a_dataTS['LstSrvD'].str.contains(proper_year)]
last_survey_name = "LastSurveyFiltered"
else:
last_survey_name = "LastSurveyNotFiltered"
# print ("________________________________")
# print ("after filtering last_survey_name")
# print ("shape of a_dataTS is [%(nrow)d]." % {"nrow":a_dataTS.shape[0]})
# print (len(a_dataTS.ID.unique()))
#
#
#
counties = a_dataTS.county.unique()
# a_county = counties[0]
total_unique_fields_count = 0
total_randomly_chosen_fields_count = 0
for a_county in counties:
a_countys_DT = a_dataTS[a_dataTS.county == a_county]
cultivars = a_countys_DT.CropTyp.unique()
# a_cultivar = cultivars[0]
for a_cultivar in cultivars:
a_cult_in_a_county = a_countys_DT[a_countys_DT.CropTyp == a_cultivar]
unique_fields = a_cult_in_a_county.ID.unique()
number_of_unique_fields = len(unique_fields)
total_unique_fields_count = total_unique_fields_count + number_of_unique_fields
# print ("________________________________________________________________________________")
# print ("number_of_unique_fields is [%(nrow)d]." % {"nrow":number_of_unique_fields})
number_of_fields_to_pick = int(np.ceil(number_of_unique_fields * (perc_of_fields_to_pick/100)))
# print ("number_of_fields_to_pick is [%(nrow)d]." % {"nrow":number_of_fields_to_pick})
# randomly choose from unique fields
randomly_chosen_fields = list(np.random.choice(unique_fields, number_of_fields_to_pick, replace=False))
total_randomly_chosen_fields_count = total_randomly_chosen_fields_count + len(randomly_chosen_fields)
"""
##########
##########
"""
# randomly_chosen_fields_DT = a_cult_in_a_county[a_cult_in_a_county.ID.isin(randomly_chosen_fields)]
randomly_chosen_fields_DT = pd.DataFrame(data=None, index=None,
columns=needed_features, dtype=None, copy=False)
for anID in randomly_chosen_fields:
curr_F = a_cult_in_a_county[a_cult_in_a_county.ID == anID].copy().reset_index(drop=True)
# print ("this should be one " + str(len(curr_F.ID.unique())))
randomly_chosen_fields_DT = | pd.concat([randomly_chosen_fields_DT, curr_F]) | pandas.concat |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.flag = flag
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
self.df_raw = df_raw
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.15)
num_test = int(len(df_raw)*0.80)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
# Cắt lấy dòng tiêu đề và loại bỏ cột date. Dữ liệu bên trong: Index(['open', 'close', 'TT'], dtype='object')
cols_data = df_raw.columns[1:]
# lọc loại bỏ cột date
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
# dữ liệu dùng để train
train_data = df_data[border1s[0]:border2s[0]]
# tính mean và sdt chuẩn bị cho thu nhỏ dữ liệu
self.scaler.fit(train_data.values)
# thu nhỏ dữ liệu
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
print('Bảng ' + self.flag + ': \n', df_data[border1:border2])
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def length(self):
print (len(self.data_x) - self.seq_len- self.pred_len + 1, ' / ', len(self.df_raw) )
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.97)
num_test = int(len(df_raw)*0.0215)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = | pd.to_datetime(df_stamp.date) | pandas.to_datetime |
import numpy as np
import pandas as pd
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
try:
from sklearn.base import TransformerMixin, BaseEstimator
except ImportError:
msg = "scikit-learn not installed"
logger.warning(msg)
try:
from fancyimpute import IterativeImputer, SoftImpute
except ImportError:
msg = "fancyimpute not installed"
logger.warning(msg)
class MultipleImputer(BaseEstimator, TransformerMixin):
"""
Multiple Imputation via fancyimpute.IterativeImputer.
"""
def __init__(self, multiple=5, n_iter=10, groupby=None, *args, **kwargs):
self.multiple = multiple
self.n_iter = n_iter
self.args = args
self.kwargs = kwargs
self.groupby = groupby
def transform(self, X, *args, **kwargs):
assert isinstance(X, pd.DataFrame)
df = pd.DataFrame(columns=X.columns, index=X.index)
if isinstance(self.imputers, dict):
for c, d in self.imputers.items():
mask = d["mask"]
imputers = d["impute"]
imputed_data = np.array([imp.transform(X[mask, :]) for imp in imputers])
mean = np.mean(imputed_data, axis=0)
df.loc[mask, ~ | pd.isnull(X[mask, :]) | pandas.isnull |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
( | BQuarterEnd(1, startingMonth=2) | pandas.core.datetools.BQuarterEnd |
# for adding data(bills,elevator,etc.) as input please type append(root_out) in python console which root_out is the path including the name of the csv file that your inputs will be saved in there
# for dividing expenses please type execute_all_division_funcs(root_in, root_out, root_info) in python console
# for acquiring a balance for specific units and specified time period please type balance(root_in) in python console
# for obtaining a csv file including a transaction history between a specific time period transaction_history(root_in) in python console
# for acquiring a percent share from total expenses that categories or subcategories have, please type portion_by_category(root_in) or portion_by_subcategory(root_in) in python console
# for acquiring a percent share from total expenses that units have, please type portion_by_unit(root_in) in python console
# for acquiring a cumulative sum based on units type cumulative_sum_for_units(root_in) and for acquiring a cumulative sum based on subcategories type cumulative_sum_for_subcategories(root_in) in console python
# for observing a status of the buildings total balance please type negative_balance_error(root_in) in python console
# for acquiring an estimation of next year's monthly expenses for each unit please type next_year_expenditure_estimation(root_in, root_info) in python console
def append(root_out: str):
""" This function accepts inputs from the user. The root_out variable is
the path and the name of the csv file that you want to save your inputs into. """
import pandas as pd
import datetime as dt
d = {'amount': [], 'time':[], 'category': [] , 'subcategory': [],
'responsible unit': [], 'related unit': [[]],
'div': [], 'description': []}
amount = int(input('amount:'))
d['amount'].append(amount)
time = input('time( Example: 1399/09/21 ) : ')
d['time'].append(dt.date(int(time[0:4]),int(time[5:7]), int(time[8:])))
category = input("category: 1) bill 2) cleaning 3) elevator 4) parking 5) repairs 6) charge 7) other [1/2/3/4/5/6/7] :")
if category == '1':
d['category'].append('bill')
elif category == '2':
d['category'].append('cleaning')
elif category == '3':
d['category'].append('elevator')
elif category == '4':
d['category'].append('parking')
elif category == '5':
d['category'].append('repairs')
elif category == '6':
d['category'].append('charge')
elif category == '7':
d['category'].append('other')
if category == '1':
subcategory = input('subcategory: 1) water 2) gas 3) electricity 4) tax [1/2/3/4] :')
if subcategory == '1':
subcategory = 'water'
elif subcategory == '2':
subcategory = 'gas'
elif subcategory == '3':
subcategory = 'electricity'
elif subcategory == '4':
subcategory = 'tax'
else:
subcategory = 'undefind'
d['subcategory'].append(subcategory)
responsible_unit = input('responsible unit:')
d['responsible unit'].append(responsible_unit)
related_unit = input('related unit:(please enter the related units as the form first unit number, second unit number,....Note that if you want to include all units you should enter the number of all units)').split(',')
for e in related_unit:
d['related unit'][0].append(eval(e))
div = input('div: 1) -e 2) -r 3) -d 4) -a 5) -p [1/2/3/4/5] :(Note that if you have selected charge as a category, -d must be chosen as the division type.)')
if div == '1':
div = 'equal'
d['div'].append(div)
elif div == '2':
div = 'number'
d['div'].append(div)
elif div == '3':
div = 'default'
d['div'].append(div)
elif div == '4':
div = 'area'
d['div'].append(div)
elif div == '5':
div = 'parking'
d['div'].append(div)
description = input('description:')
d['description'].append(description)
i = input('Is there anything left? A)yes B)no [A/B] :')
if i == 'B':
pd.DataFrame(d).to_csv(root_out, mode = 'a', header= False, index = False)
return
else:
pd.DataFrame(d).to_csv(root_out, mode = 'a', header = False, index = False)
append(root_out)
############# expense division functions
# for dividing expenses please type execute_all_division_funcs(root_in, root_out, root_info) in python console
def equal(root_in: str, root_out: str):
""" This function divides expenses evenly between units. The root_in
variable is the path and the name of your user input data excel file.
The root_out variable is the path and the name of the csv file that
you want to save your transactions after running the function. """
import pandas as pd
import numpy as np
user_input_df = pd.read_excel(root_in, names=['amount','time','category','subcategory','related unit','div'],index_col =False)
user_input_df = user_input_df[user_input_df['div'] == 'equal'][['amount','time','category','subcategory','related unit']]
# A series of operations for changing the related unit's class from object to a list. Useful when executing the explode method
user_input_df['related unit'] = user_input_df['related unit'].str.replace('[','')
user_input_df['related unit'] = user_input_df['related unit'].str.replace(']','')
user_input_df['related unit'] = user_input_df['related unit'].str.replace(' ','')
user_input_df['related unit'] = list(user_input_df['related unit'].str.split(','))
costs_for_each_unit = []
for i in range(len(user_input_df['related unit'])):
costs_for_each_unit.append(user_input_df.iloc[i]['amount'] // len(user_input_df.iloc[i]['related unit']))
user_input_df['cost for each unit'] = np.array(costs_for_each_unit)
user_input_df = user_input_df.explode('related unit')
user_input_df.to_csv(root_out, mode = 'a', header = False, index = False)
return
def number(root_in: str, root_out: str, root_info: str):
""" This function divides expenses according to the number of
living in each apartment. The root_in variable is the path and the name
of your user input data excel file. The root_out variable is the path
and the name of the csv file that you want to save your transactions
after running the function. The root_info variable is the path and the
name your building residents' information excel file. """
import pandas as pd
import numpy as np
user_input_df = pd.read_excel(root_in, names=['amount','time','category','subcategory','related unit','div'],index_col=False)
resident_info = pd.read_excel(root_info)
# Changing the column name and making it the same as the other dataframe. Useful when performing merge
resident_info = resident_info.rename(columns = {'number': 'related unit'})
user_input_df = user_input_df[user_input_df['div'] == 'number'][['amount','time','category','subcategory','related unit']]
# A series of operations for changing the related unit's class from object to a list. Useful when executing the explode method
user_input_df['related unit'] = user_input_df['related unit'].str.replace('[','')
user_input_df['related unit'] = user_input_df['related unit'].str.replace(']','')
user_input_df['related unit'] = user_input_df['related unit'].str.replace(' ','')
user_input_df['related unit'] = list(user_input_df['related unit'].str.split(','))
# Calculating the total residents that each row's related units have and adding them as a new column to user_input_df.
resident_info['related unit'] = resident_info['related unit'].astype(str)
total_residents = []
for i in range(len(user_input_df['related unit'])):
total = resident_info[resident_info['related unit'].isin(user_input_df.iloc[i]['related unit'])]['residents'].sum()
total_residents.append(total)
user_input_df['total resident'] = np.array(total_residents)
user_input_df = user_input_df.explode('related unit',ignore_index = True)
# Adding the related residents of each unit after the explosion method
user_input_df = resident_info[['residents','related unit']].merge(user_input_df, on = 'related unit')
user_input_df['cost for each unit'] = (user_input_df['amount'] * user_input_df['residents']) // user_input_df['total resident']
del user_input_df['total resident']
del user_input_df['residents']
user_input_df.to_csv(root_out, mode = 'a', header = False, index = False)
return
def area(root_in: str, root_out: str, root_info: str):
""" This function divides expenses according to the area of each apartment.
The root_in variable is the path and the name of your user input data
excel file. The root_out variable is the path and the name of the csv
file that you want to save your transactions after running the function.
The root_info variable is the path and the name your building residents'
information excel file. """
import pandas as pd
import numpy as np
user_input_df = pd.read_excel(root_in, names=['amount','time','category','subcategory','related unit','div'],index_col=False)
resident_info = | pd.read_excel(root_info) | pandas.read_excel |
import os
import time
import torch
import argparse
import scipy.io
import warnings
from torch.autograd import Variable
from torchvision import datasets, transforms
import dataset
from darknet import Darknet
from utils import *
from MeshPly import MeshPly
import argparse
import pandas as pd
# Create new directory
def makedirs_(path):
if not os.path.exists( path ):
os.makedirs( path )
def truths_length(truths, max_num_gt=50):
for i in range(max_num_gt):
if truths[i][1] == 0:
return i
def valid(datacfg, cfgfile, weightfile, outfile):
# def valid(datacfg, modelcfg, weightfile):
# Parse configuration files
data_options = read_data_cfg(datacfg)
valid_images = data_options['valid']
meshname = data_options['mesh']
backupdir = data_options['backup']
name = data_options['name']
gpus = data_options['gpus']
fx = float(data_options['fx'])
fy = float(data_options['fy'])
u0 = float(data_options['u0'])
v0 = float(data_options['v0'])
im_width = int(data_options['width'])
im_height = int(data_options['height'])
if not os.path.exists(backupdir):
makedirs_(backupdir)
# Parameters
seed = int(time.time())
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
save = False
testtime = True
num_classes = 1
testing_samples = 0.0
if save:
makedirs_(backupdir + '/test')
makedirs_(backupdir + '/test/gt')
makedirs_(backupdir + '/test/pr')
# To save
testing_error_trans = 0.0
testing_error_angle = 0.0
testing_error_pixel = 0.0
errs_2d = []
errs_3d = []
errs_trans = []
errs_angle = []
errs_corner2D = []
preds_trans = []
preds_rot = []
preds_corners2D = []
gts_trans = []
gts_rot = []
gts_corners2D = []
# Read object model information, get 3D bounding box corners
mesh = MeshPly(meshname)
vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
corners3D = get_3D_corners(vertices)
try:
diam = float(options['diam'])
except:
diam = calc_pts_diameter(np.array(mesh.vertices))
# Read intrinsic camera parameters
intrinsic_calibration = get_camera_intrinsic(u0, v0, fx, fy)
# Get validation file names
with open(valid_images) as fp:
tmp_files = fp.readlines()
valid_files = [item.rstrip() for item in tmp_files]
# Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
model = Darknet(cfgfile, distiling=distiling)
model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
test_width = model.test_width
test_height = model.test_height
num_keypoints = model.num_keypoints
num_labels = num_keypoints * 2 + 3
# Get the parser for the test dataset
valid_dataset = dataset.listDataset(valid_images,
shape=(test_width, test_height),
shuffle=False,
transform=transforms.Compose([transforms.ToTensor(),]))
# Specify the number of workers for multiple processing, get the dataloader for the test dataset
kwargs = {'num_workers': 4, 'pin_memory': True}
test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs)
logging(" Testing {}...".format(name))
logging(" Number of test samples: %d" % len(test_loader.dataset))
# Iterate through test batches (Batch size for test data is 1)
count = 0
for batch_idx, (data, target) in enumerate(test_loader):
t1 = time.time()
# Pass data to GPU
data = data.cuda()
target = target.cuda()
# Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
data = Variable(data, volatile=True)
t2 = time.time()
# Forward pass
output = model(data).data
t3 = time.time()
# Using confidence threshold, eliminate low-confidence predictions
all_boxes = get_region_boxes(output, num_classes, num_keypoints)
t4 = time.time()
# Evaluation
# Iterate through all batch elements
for box_pr, target in zip([all_boxes], [target[0]]):
# For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)
truths = target.view(-1, num_keypoints*2+3)
# Get how many objects are present in the scene
num_gts = truths_length(truths)
# Iterate through each ground-truth object
for k in range(num_gts):
box_gt = list()
for j in range(1, 2*num_keypoints+1):
box_gt.append(truths[k][j])
box_gt.extend([1.0, 1.0])
box_gt.append(truths[k][0])
# Denormalize the corner predictions
corners2D_gt = np.array(np.reshape(box_gt[:18], [9, 2]), dtype='float32')
corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')
corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width
corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height
corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
preds_corners2D.append(corners2D_pr)
gts_corners2D.append(corners2D_gt)
# Compute corner prediction error
corner_norm = np.linalg.norm(corners2D_gt - corners2D_pr, axis=1)
corner_dist = np.mean(corner_norm)
errs_corner2D.append(corner_dist)
# Compute [R|t] by pnp
R_gt, t_gt = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_gt, np.array(intrinsic_calibration, dtype='float32'))
R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(intrinsic_calibration, dtype='float32'))
# Compute translation error
trans_dist = np.sqrt(np.sum(np.square(t_gt - t_pr)))
errs_trans.append(trans_dist)
# Compute angle error
angle_dist = calcAngularDistance(R_gt, R_pr)
errs_angle.append(angle_dist)
# Compute pixel error
Rt_gt = np.concatenate((R_gt, t_gt), axis=1)
Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
proj_2d_gt = compute_projection(vertices, Rt_gt, intrinsic_calibration)
proj_2d_pred = compute_projection(vertices, Rt_pr, intrinsic_calibration)
norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)
pixel_dist = np.mean(norm)
errs_2d.append(pixel_dist)
# Compute 3D distances
transform_3d_gt = compute_transformation(vertices, Rt_gt)
transform_3d_pred = compute_transformation(vertices, Rt_pr)
norm3d = np.linalg.norm(transform_3d_gt - transform_3d_pred, axis=0)
vertex_dist = np.mean(norm3d)
errs_3d.append(vertex_dist)
# Sum errors
testing_error_trans += trans_dist
testing_error_angle += angle_dist
testing_error_pixel += pixel_dist
testing_samples += 1
count = count + 1
if save:
preds_trans.append(t_pr)
gts_trans.append(t_gt)
preds_rot.append(R_pr)
gts_rot.append(R_gt)
np.savetxt(backupdir + '/test/gt/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_gt, dtype='float32'))
np.savetxt(backupdir + '/test/gt/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_gt, dtype='float32'))
np.savetxt(backupdir + '/test/pr/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_pr, dtype='float32'))
np.savetxt(backupdir + '/test/pr/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_pr, dtype='float32'))
np.savetxt(backupdir + '/test/gt/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_gt, dtype='float32'))
np.savetxt(backupdir + '/test/pr/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_pr, dtype='float32'))
t5 = time.time()
# Compute 2D projection error, 6D pose error, 5cm5degree error
px_threshold = 5 # 5 pixel threshold for 2D reprojection error is standard in recent sota 6D object pose estimation works
eps = 1e-5
acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d)+eps)
acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)
acc3d10 = len(np.where(np.array(errs_3d) <= diam * 0.1)[0]) * 100. / (len(errs_3d)+eps)
acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)
corner_acc = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D)+eps)
mean_err_2d = np.mean(errs_2d)
mean_corner_err_2d = np.mean(errs_corner2D)
nts = float(testing_samples)
if testtime:
print('-----------------------------------')
print(' tensor to cuda : %f' % (t2 - t1))
print(' forward pass : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' prediction time : %f' % (t4 - t1))
print(' eval : %f' % (t5 - t4))
print('-----------------------------------')
# Print test statistics
logging('Results of {}'.format(name))
logging(' Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))
logging(' Acc using 10% threshold - {} vx 3D Transformation = {:.2f}%'.format(diam * 0.1, acc3d10))
logging(' Acc using 5 cm 5 degree metric = {:.2f}%'.format(acc5cm5deg))
logging(" Mean 2D pixel error is %f, Mean vertex error is %f, mean corner error is %f" % (mean_err_2d, np.mean(errs_3d), mean_corner_err_2d))
logging(' Translation error: %f m, angle error: %f degree, pixel error: % f pix' % (testing_error_trans/nts, testing_error_angle/nts, testing_error_pixel/nts) )
result_data = {
'model': modelcfg[23:-4],
'object': datacfg[14:-5],
'2d_projection': acc,
'3d_transformation': acc3d10,
}
csv_output_name = 'valid_metrics_distilling.csv' if distiling else 'valid_metrics.csv'
try:
df = | pd.read_csv(csv_output_name) | pandas.read_csv |
import inspect
import os
import datetime
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal, assert_frame_equal
from numpy.testing import assert_allclose
from pvlib import tmy
from pvlib import pvsystem
from pvlib import clearsky
from pvlib import irradiance
from pvlib import atmosphere
from pvlib import solarposition
from pvlib.location import Location
from conftest import needs_numpy_1_10, requires_scipy
latitude = 32.2
longitude = -111
tus = Location(latitude, longitude, 'US/Arizona', 700, 'Tucson')
times = pd.date_range(start=datetime.datetime(2014,1,1),
end=datetime.datetime(2014,1,2), freq='1Min')
ephem_data = solarposition.get_solarposition(times,
latitude=latitude,
longitude=longitude,
method='nrel_numpy')
am = atmosphere.relativeairmass(ephem_data.apparent_zenith)
irrad_data = clearsky.ineichen(ephem_data['apparent_zenith'], am,
linke_turbidity=3)
aoi = irradiance.aoi(0, 0, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
meta = {'latitude': 37.8,
'longitude': -122.3,
'altitude': 10,
'Name': 'Oakland',
'State': 'CA',
'TZ': -8}
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(tmy)))
tmy3_testfile = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
tmy2_testfile = os.path.join(pvlib_abspath, 'data', '12839.tm2')
tmy3_data, tmy3_metadata = tmy.readtmy3(tmy3_testfile)
tmy2_data, tmy2_metadata = tmy.readtmy2(tmy2_testfile)
def test_systemdef_tmy3():
expected = {'tz': -9.0,
'albedo': 0.1,
'altitude': 7.0,
'latitude': 55.317,
'longitude': -160.517,
'name': '"SAND POINT"',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy3_metadata, 0, 0, .1, 5, 5)
def test_systemdef_tmy2():
expected = {'tz': -5,
'albedo': 0.1,
'altitude': 2.0,
'latitude': 25.8,
'longitude': -80.26666666666667,
'name': 'MIAMI',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy2_metadata, 0, 0, .1, 5, 5)
def test_systemdef_dict():
expected = {'tz': -8, ## Note that TZ is float, but Location sets tz as string
'albedo': 0.1,
'altitude': 10,
'latitude': 37.8,
'longitude': -122.3,
'name': 'Oakland',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 5}
assert expected == pvsystem.systemdef(meta, 5, 0, .1, 5, 5)
@needs_numpy_1_10
def test_ashraeiam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.ashraeiam(thetas, .05)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_ashraeiam():
module_parameters = pd.Series({'b': 0.05})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.ashraeiam(thetas)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_physicaliam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.physicaliam(thetas, 1.526, 0.002, 4)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_physicaliam():
module_parameters = pd.Series({'K': 4, 'L': 0.002, 'n': 1.526})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.physicaliam(thetas)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
# if this completes successfully we'll be able to do more tests below.
@pytest.fixture(scope="session")
def sam_data():
data = {}
data['cecmod'] = pvsystem.retrieve_sam('cecmod')
data['sandiamod'] = pvsystem.retrieve_sam('sandiamod')
data['cecinverter'] = pvsystem.retrieve_sam('cecinverter')
return data
@pytest.fixture(scope="session")
def sapm_module_params(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module]
return module_parameters
@pytest.fixture(scope="session")
def cec_module_params(sam_data):
modules = sam_data['cecmod']
module = 'Example_Module'
module_parameters = modules[module]
return module_parameters
def test_sapm(sapm_module_params):
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with a dict input
pvsystem.sapm(effective_irradiance, temp_cell,
sapm_module_params.to_dict())
def test_PVSystem_sapm(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = system.sapm(effective_irradiance, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
airmass = pd.Series([1, 10], index=times)
out = system.sapm_spectral_loss(airmass)
@pytest.mark.parametrize('aoi,expected', [
(45, 0.9975036250000002),
(np.array([[-30, 30, 100, np.nan]]),
np.array([[np.nan, 1.007572, 0, np.nan]])),
(pd.Series([80]), pd.Series([0.597472]))
])
def test_sapm_aoi_loss(sapm_module_params, aoi, expected):
out = pvsystem.sapm_aoi_loss(aoi, sapm_module_params)
if isinstance(aoi, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_sapm_aoi_loss_limits():
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters) == 5
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters, upper=1) == 1
module_parameters = {'B0': -5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters) == 0
def test_PVSystem_sapm_aoi_loss(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
aoi = pd.Series([45, 10], index=times)
out = system.sapm_aoi_loss(aoi)
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45, 1000], 1.1400510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10]),
1000],
np.array([np.nan, np.nan, 1.081157])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10]), 1370],
pd.Series([0.789166]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
try:
kwargs = {'reference_irradiance': test_input[4]}
test_input = test_input[:-1]
except IndexError:
kwargs = {}
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input, **kwargs)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
poa_direct = np.array([np.nan, 1000, 1000])
poa_diffuse = np.array([100, np.nan, 100])
airmass_absolute = np.array([1.1, 1.1, 1.1])
aoi = np.array([10, 10, 10])
reference_irradiance = 1000
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute,
aoi, reference_irradiance=reference_irradiance)
def test_calcparams_desoto(cec_module_params):
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
poa_data = pd.Series([0, 800], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
poa_data,
temp_cell=25,
alpha_isc=cec_module_params['alpha_sc'],
module_parameters=cec_module_params,
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(np.round(IL, 3), pd.Series([0.0, 6.036], index=times))
assert_allclose(I0, 1.943e-9)
assert_allclose(Rs, 0.094)
assert_series_equal(np.round(Rsh, 3), pd.Series([np.inf, 19.65], index=times))
assert_allclose(nNsVth, 0.473)
def test_PVSystem_calcparams_desoto(cec_module_params):
module_parameters = cec_module_params.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
system = pvsystem.PVSystem(module_parameters=module_parameters)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
poa_data = pd.Series([0, 800], index=times)
temp_cell = 25
IL, I0, Rs, Rsh, nNsVth = system.calcparams_desoto(poa_data, temp_cell)
assert_series_equal(np.round(IL, 3), pd.Series([0.0, 6.036], index=times))
assert_allclose(I0, 1.943e-9)
assert_allclose(Rs, 0.094)
assert_series_equal(np.round(Rsh, 3), pd.Series([np.inf, 19.65], index=times))
assert_allclose(nNsVth, 0.473)
@requires_scipy
def test_v_from_i():
output = pvsystem.v_from_i(20, .1, .5, 3, 6e-7, 7)
assert_allclose(7.5049875193450521, output, atol=1e-5)
@requires_scipy
def test_v_from_i_big():
output = pvsystem.v_from_i(500, 10, 4.06, 0, 6e-10, 1.2)
assert_allclose(86.320000493521079, output, atol=1e-5)
@requires_scipy
def test_v_from_i_bigger():
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225
output = pvsystem.v_from_i(190, 1.065, 2.89, 0, 7.05196029e-08, 10.491262)
assert_allclose(54.303958833791455, output, atol=1e-5)
@requires_scipy
def test_i_from_v():
output = pvsystem.i_from_v(20, .1, .5, 40, 6e-7, 7)
assert_allclose(-299.746389916, output, atol=1e-5)
@requires_scipy
def test_PVSystem_i_from_v():
system = pvsystem.PVSystem()
output = system.i_from_v(20, .1, .5, 40, 6e-7, 7)
assert_allclose(-299.746389916, output, atol=1e-5)
@requires_scipy
def test_singlediode_series(cec_module_params):
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
poa_data = pd.Series([0, 800], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
poa_data,
temp_cell=25,
alpha_isc=cec_module_params['alpha_sc'],
module_parameters=cec_module_params,
EgRef=1.121,
dEgdT=-0.0002677)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth)
assert isinstance(out, pd.DataFrame)
@requires_scipy
def test_singlediode_array():
# github issue 221
photocurrent = np.linspace(0, 10, 11)
resistance_shunt = 16
resistance_series = 0.094
nNsVth = 0.473
saturation_current = 1.943e-09
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
expected = np.array([
0. , 0.54538398, 1.43273966, 2.36328163, 3.29255606,
4.23101358, 5.16177031, 6.09368251, 7.02197553, 7.96846051,
8.88220557])
assert_allclose(sd['i_mp'], expected, atol=0.01)
@requires_scipy
def test_singlediode_floats(sam_data):
module = 'Example_Module'
module_parameters = sam_data['cecmod'][module]
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5)
expected = {'i_xx': 4.2685798754011426,
'i_mp': 6.1390251797935704,
'v_oc': 8.1063001465863085,
'p_mp': 38.194165464983037,
'i_x': 6.7556075876880621,
'i_sc': 6.9646747613963198,
'v_mp': 6.221535886625464,
'i': None,
'v': None}
assert isinstance(out, dict)
for k, v in out.items():
if k in ['i', 'v']:
assert v is None
else:
assert_allclose(expected[k], v, atol=3)
@requires_scipy
def test_singlediode_floats_ivcurve():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, ivcurve_pnts=3)
expected = {'i_xx': 4.2685798754011426,
'i_mp': 6.1390251797935704,
'v_oc': 8.1063001465863085,
'p_mp': 38.194165464983037,
'i_x': 6.7556075876880621,
'i_sc': 6.9646747613963198,
'v_mp': 6.221535886625464,
'i': np.array([6.965172e+00, 6.755882e+00, 2.575717e-14]),
'v': np.array([0. , 4.05315, 8.1063])}
assert isinstance(out, dict)
for k, v in out.items():
assert_allclose(expected[k], v, atol=3)
@requires_scipy
def test_singlediode_series_ivcurve(cec_module_params):
times = pd.DatetimeIndex(start='2015-06-01', periods=3, freq='6H')
poa_data = pd.Series([0, 400, 800], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
poa_data,
temp_cell=25,
alpha_isc=cec_module_params['alpha_sc'],
module_parameters=cec_module_params,
EgRef=1.121,
dEgdT=-0.0002677)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3)
expected = OrderedDict([('i_sc', array([ nan, 3.01054475, 6.00675648])),
('v_oc', array([ nan, 9.96886962, 10.29530483])),
('i_mp', array([ nan, 2.65191983, 5.28594672])),
('v_mp', array([ nan, 8.33392491, 8.4159707 ])),
('p_mp', array([ nan, 22.10090078, 44.48637274])),
('i_x', array([ nan, 2.88414114, 5.74622046])),
('i_xx', array([ nan, 2.04340914, 3.90007956])),
('v',
array([[ nan, nan, nan],
[ 0. , 4.98443481, 9.96886962],
[ 0. , 5.14765242, 10.29530483]])),
('i',
array([[ nan, nan, nan],
[ 3.01079860e+00, 2.88414114e+00, 3.10862447e-14],
[ 6.00726296e+00, 5.74622046e+00, 0.00000000e+00]]))])
for k, v in out.items():
assert_allclose(expected[k], v, atol=1e-2)
def test_scale_voltage_current_power(sam_data):
data = pd.DataFrame(
np.array([[2, 1.5, 10, 8, 12, 0.5, 1.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
expected = pd.DataFrame(
np.array([[6, 4.5, 20, 16, 72, 1.5, 4.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
out = pvsystem.scale_voltage_current_power(data, voltage=2, current=3)
def test_PVSystem_scale_voltage_current_power():
data = pd.DataFrame(
np.array([[2, 1.5, 10, 8, 12, 0.5, 1.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
expected = pd.DataFrame(
np.array([[6, 4.5, 20, 16, 72, 1.5, 4.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
system = pvsystem.PVSystem(modules_per_string=2, strings_per_inverter=3)
out = system.scale_voltage_current_power(data)
def test_sapm_celltemp():
default = pvsystem.sapm_celltemp(900, 5, 20)
assert_allclose(43.509, default.ix[0, 'temp_cell'], 3)
assert_allclose(40.809, default.ix[0, 'temp_module'], 3)
assert_frame_equal(default, pvsystem.sapm_celltemp(900, 5, 20,
[-3.47, -.0594, 3]))
def test_sapm_celltemp_dict_like():
default = pvsystem.sapm_celltemp(900, 5, 20)
assert_allclose(43.509, default.ix[0, 'temp_cell'], 3)
assert_allclose(40.809, default.ix[0, 'temp_module'], 3)
model = {'a':-3.47, 'b':-.0594, 'deltaT':3}
assert_frame_equal(default, pvsystem.sapm_celltemp(900, 5, 20, model))
model = pd.Series(model)
assert_frame_equal(default, pvsystem.sapm_celltemp(900, 5, 20, model))
def test_sapm_celltemp_with_index():
times = pd.DatetimeIndex(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
pvtemps = pvsystem.sapm_celltemp(irrads, winds, temps)
expected = pd.DataFrame({'temp_cell':[0., 23.06066166, 5.],
'temp_module':[0., 21.56066166, 5.]},
index=times)
assert_frame_equal(expected, pvtemps)
def test_PVSystem_sapm_celltemp():
system = pvsystem.PVSystem(racking_model='roof_mount_cell_glassback')
times = pd.DatetimeIndex(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
pvtemps = system.sapm_celltemp(irrads, winds, temps)
expected = pd.DataFrame({'temp_cell':[0., 30.56763059, 5.],
'temp_module':[0., 30.06763059, 5.]},
index=times)
assert_frame_equal(expected, pvtemps)
def test_snlinverter(sam_data):
inverters = sam_data['cecinverter']
testinv = 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'
vdcs = pd.Series(np.linspace(0,50,3))
idcs = pd.Series(np.linspace(0,11,3))
pdcs = idcs * vdcs
pacs = pvsystem.snlinverter(vdcs, pdcs, inverters[testinv])
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_PVSystem_snlinverter(sam_data):
inverters = sam_data['cecinverter']
testinv = 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'
system = pvsystem.PVSystem(inverter=testinv,
inverter_parameters=inverters[testinv])
vdcs = pd.Series(np.linspace(0,50,3))
idcs = pd.Series(np.linspace(0,11,3))
pdcs = idcs * vdcs
pacs = system.snlinverter(vdcs, pdcs)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_snlinverter_float(sam_data):
inverters = sam_data['cecinverter']
testinv = 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'
vdcs = 25.
idcs = 5.5
pdcs = idcs * vdcs
pacs = pvsystem.snlinverter(vdcs, pdcs, inverters[testinv])
assert_allclose(pacs, 132.004278, 5)
def test_snlinverter_Pnt_micro(sam_data):
inverters = sam_data['cecinverter']
testinv = 'Enphase_Energy__M250_60_2LL_S2x___ZC____NA__208V_208V__CEC_2013_'
vdcs = pd.Series(np.linspace(0,50,3))
idcs = pd.Series(np.linspace(0,11,3))
pdcs = idcs * vdcs
pacs = pvsystem.snlinverter(vdcs, pdcs, inverters[testinv])
assert_series_equal(pacs, pd.Series([-0.043000, 132.545914746, 240.000000]))
def test_PVSystem_creation():
pv_system = pvsystem.PVSystem(module='blah', inverter='blarg')
def test_PVSystem_get_aoi():
system = pvsystem.PVSystem(surface_tilt=32, surface_azimuth=135)
aoi = system.get_aoi(30, 225)
assert np.round(aoi, 4) == 42.7408
def test_PVSystem_get_irradiance():
system = pvsystem.PVSystem(surface_tilt=32, surface_azimuth=135)
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
location = Location(latitude=32, longitude=-111)
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni':[900,0], 'ghi':[600,0], 'dhi':[100,0]},
index=times)
irradiance = system.get_irradiance(solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'],
irrads['ghi'],
irrads['dhi'])
expected = pd.DataFrame(data=np.array(
[[ 883.65494055, 745.86141676, 137.79352379, 126.397131 ,
11.39639279],
[ 0. , -0. , 0. , 0. , 0. ]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
assert_frame_equal(irradiance, expected, check_less_precise=2)
def test_PVSystem_localize_with_location():
system = pvsystem.PVSystem(module='blah', inverter='blarg')
location = Location(latitude=32, longitude=-111)
localized_system = system.localize(location=location)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_PVSystem_localize_with_latlon():
system = pvsystem.PVSystem(module='blah', inverter='blarg')
localized_system = system.localize(latitude=32, longitude=-111)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_PVSystem___repr__():
system = pvsystem.PVSystem(module='blah', inverter='blarg')
assert system.__repr__()==('PVSystem with tilt:0 and azimuth:'+
' 180 with Module: blah and Inverter: blarg')
def test_PVSystem_localize___repr__():
system = pvsystem.PVSystem(module='blah', inverter='blarg')
localized_system = system.localize(latitude=32, longitude=-111)
assert localized_system.__repr__()==('LocalizedPVSystem with tilt:0 and'+
' azimuth: 180 with Module: blah and Inverter: blarg at '+
'Latitude: 32 and Longitude: -111')
# we could retest each of the models tested above
# when they are attached to LocalizedPVSystem, but
# that's probably not necessary at this point.
def test_LocalizedPVSystem_creation():
localized_system = pvsystem.LocalizedPVSystem(latitude=32,
longitude=-111,
module='blah',
inverter='blarg')
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_LocalizedPVSystem___repr__():
localized_system = pvsystem.LocalizedPVSystem(latitude=32,
longitude=-111,
module='blah',
inverter='blarg')
assert localized_system.__repr__()==('LocalizedPVSystem with tilt:0 and'+
' azimuth: 180 with Module: blah and Inverter: blarg at Latitude: 32 ' +
'and Longitude: -111')
def test_pvwatts_dc_scalars():
expected = 88.65
out = pvsystem.pvwatts_dc(900, 30, 100, -0.003)
assert_allclose(expected, out)
@needs_numpy_1_10
def test_pvwatts_dc_arrays():
irrad_trans = np.array([np.nan, 900, 900])
temp_cell = np.array([30, np.nan, 30])
irrad_trans, temp_cell = np.meshgrid(irrad_trans, temp_cell)
expected = np.array([[ nan, 88.65, 88.65],
[ nan, nan, nan],
[ nan, 88.65, 88.65]])
out = pvsystem.pvwatts_dc(irrad_trans, temp_cell, 100, -0.003)
assert_allclose(expected, out, equal_nan=True)
def test_pvwatts_dc_series():
irrad_trans = pd.Series([np.nan, 900, 900])
temp_cell = pd.Series([30, np.nan, 30])
expected = pd.Series(np.array([ nan, nan, 88.65]))
out = pvsystem.pvwatts_dc(irrad_trans, temp_cell, 100, -0.003)
assert_series_equal(expected, out)
def test_pvwatts_ac_scalars():
expected = 85.58556604752516
out = pvsystem.pvwatts_ac(90, 100, 0.95)
assert_allclose(expected, out)
@needs_numpy_1_10
def test_pvwatts_ac_arrays():
pdc = np.array([[np.nan], [50], [100]])
pdc0 = 100
expected = np.array([[ nan],
[ 47.60843624],
[ 95. ]])
out = pvsystem.pvwatts_ac(pdc, pdc0, 0.95)
assert_allclose(expected, out, equal_nan=True)
def test_pvwatts_ac_series():
pdc = pd.Series([np.nan, 50, 100])
pdc0 = 100
expected = pd.Series(np.array([ nan, 47.608436, 95. ]))
out = pvsystem.pvwatts_ac(pdc, pdc0, 0.95)
assert_series_equal(expected, out)
def test_pvwatts_losses_default():
expected = 14.075660688264469
out = pvsystem.pvwatts_losses()
assert_allclose(expected, out)
@needs_numpy_1_10
def test_pvwatts_losses_arrays():
expected = np.array([nan, 14.934904])
age = np.array([nan, 1])
out = pvsystem.pvwatts_losses(age=age)
assert_allclose(expected, out)
def test_pvwatts_losses_series():
expected = pd.Series([nan, 14.934904])
age = pd.Series([nan, 1])
out = pvsystem.pvwatts_losses(age=age)
assert_series_equal(expected, out)
def make_pvwatts_system():
module_parameters = {'pdc0': 100, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = pvsystem.PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
def test_PVSystem_pvwatts_dc():
system = make_pvwatts_system()
irrad_trans = pd.Series([np.nan, 900, 900])
temp_cell = pd.Series([30, np.nan, 30])
expected = pd.Series(np.array([ nan, nan, 88.65]))
out = system.pvwatts_dc(irrad_trans, temp_cell)
assert_series_equal(expected, out)
def test_PVSystem_pvwatts_losses():
system = make_pvwatts_system()
expected = pd.Series([nan, 14.934904])
age = pd.Series([nan, 1])
out = system.pvwatts_losses(age=age)
assert_series_equal(expected, out)
def test_PVSystem_pvwatts_ac():
system = make_pvwatts_system()
pdc = | pd.Series([np.nan, 50, 100]) | pandas.Series |
'''
Author: <NAME>
File: composite_frame
Trello: Goal 1
'''
from typing import List
import numpy as np
import pandas as pd
class Composite_Frame(object):
'''
The Composite_Frame class takes a pandas data frame containing network flow
information and splits into a list of frames, each representing the telemtry
of the network at a given time interval.
Dataset used: BoT IoT Dataset (10 best features CSV)
'''
def __init__(self, frame: pd.DataFrame, interval: int, max_frames: int = -1):
'''
Instance variables:
@self.items -> The list of dataframes derived from the parent frame
@self._interval -> The time interval to split the parent frame on
@self.max_frames -> The maximum number of time frames to include in
the composite frame. Default is set to 10,000
'''
if max_frames < 0:
self.max_frames = 10000
else:
self.max_frames = max_frames
self.items: List[pd.DataFrame] = self._split_frame(frame, interval)
self._interval = interval
def _insert_row(self, row_number: int, df: pd.DataFrame, row_value):
'''
Borrowed this function from the following Geeks for Geeks
article: https://www.geeksforgeeks.org/insert-row-at-given-position-in-pandas-dataframe/
'''
# Split old dataframe
df1 = df[0:row_number]
df2 = df[row_number:]
# Add new row to first subframe
df1.loc[row_number] = row_value
# Create new dataframe from two subframes
df_result = | pd.concat([df1, df2]) | pandas.concat |
#!/usr/bin/env python
import networkx as nx, pandas as pd, sys, csv
from argparse import ArgumentParser
def rowsplit(s): return s.rstrip(";").split(";")
def tab_to_dataframe(infile,families):
df = pd.DataFrame(columns=['Node1','Node2'])
j = 0
with open(infile, 'r') as fh:
for i,row in enumerate(csv.reader(fh,delimiter="\t")):
if i==0: continue
[gene,pf,tigr,cog] = row
pfams = rowsplit(pf)
tigrfams = rowsplit(tigr)
cogs = rowsplit(cog)
store = pfams+tigrfams+cogs
if families: store = list(set(store).intersection(set(families)))
elif len(store)==1:
tmp = pd.DataFrame(data=[store[0],""],index=["Node1","Node2"],columns=[j]).T
df = | pd.concat([df,tmp]) | pandas.concat |
"""
Testing that functions from rpy work as expected
"""
import pandas as pd
import numpy as np
import unittest
import nose
import pandas.util.testing as tm
try:
import pandas.rpy.common as com
from rpy2.robjects import r
import rpy2.robjects as robj
except ImportError:
raise nose.SkipTest('R not installed')
class TestCommon(unittest.TestCase):
def test_convert_list(self):
obj = r('list(a=1, b=2, c=3)')
converted = com.convert_robj(obj)
expected = {'a': [1], 'b': [2], 'c': [3]}
tm.assert_dict_equal(converted, expected)
def test_convert_nested_list(self):
obj = r('list(a=list(foo=1, bar=2))')
converted = com.convert_robj(obj)
expected = {'a': {'foo': [1], 'bar': [2]}}
tm.assert_dict_equal(converted, expected)
def test_convert_frame(self):
# built-in dataset
df = r['faithful']
converted = com.convert_robj(df)
assert np.array_equal(converted.columns, ['eruptions', 'waiting'])
assert np.array_equal(converted.index, np.arange(1, 273))
def _test_matrix(self):
r('mat <- matrix(rnorm(9), ncol=3)')
r('colnames(mat) <- c("one", "two", "three")')
r('rownames(mat) <- c("a", "b", "c")')
return r['mat']
def test_convert_matrix(self):
mat = self._test_matrix()
converted = com.convert_robj(mat)
assert np.array_equal(converted.index, ['a', 'b', 'c'])
assert np.array_equal(converted.columns, ['one', 'two', 'three'])
def test_convert_r_dataframe(self):
is_na = robj.baseenv.get("is.na")
seriesd = | tm.getSeriesData() | pandas.util.testing.getSeriesData |
import os, codecs
import pandas as pd
import numpy as np
PATH = '../input/'
# 共享单车轨迹数据
bike_track = pd.concat([
pd.read_csv(PATH + 'gxdc_gj20201221.csv'),
pd.read_csv(PATH + 'gxdc_gj20201222.csv'),
pd.read_csv(PATH + 'gxdc_gj20201223.csv'),
pd.read_csv(PATH + 'gxdc_gj20201224.csv'),
pd.read_csv(PATH + 'gxdc_gj20201225.csv')
])
# 按照单车ID和时间进行排序
bike_track = bike_track.sort_values(['BICYCLE_ID', 'LOCATING_TIME'])
import folium
m = folium.Map(location=[24.482426, 118.157606], zoom_start=12)
my_PolyLine=folium.PolyLine(locations=bike_track[bike_track['BICYCLE_ID'] == '000152773681a23a7f2d9af8e8902703'][['LATITUDE', 'LONGITUDE']].values,weight=5)
m.add_children(my_PolyLine)
def bike_fence_format(s):
s = s.replace('[', '').replace(']', '').split(',')
s = np.array(s).astype(float).reshape(5, -1)
return s
# 共享单车停车点位(电子围栏)数据
bike_fence = pd.read_csv(PATH + 'gxdc_tcd.csv')
bike_fence['FENCE_LOC'] = bike_fence['FENCE_LOC'].apply(bike_fence_format)
import folium
m = folium.Map(location=[24.482426, 118.157606], zoom_start=12)
for data in bike_fence['FENCE_LOC'].values[:100]:
folium.Marker(
data[0, ::-1]
).add_to(m)
m
# 共享单车订单数据
bike_order = pd.read_csv(PATH + 'gxdc_dd.csv')
bike_order = bike_order.sort_values(['BICYCLE_ID', 'UPDATE_TIME'])
import folium
m = folium.Map(location=[24.482426, 118.157606], zoom_start=12)
my_PolyLine=folium.PolyLine(locations=bike_order[bike_order['BICYCLE_ID'] == '0000ff105fd5f9099b866bccd157dc50'][['LATITUDE', 'LONGITUDE']].values,weight=5)
m.add_children(my_PolyLine)
# 轨道站点进站客流数据
rail_inflow = pd.read_excel(PATH + 'gdzdtjsj_jzkl.csv')
rail_inflow = rail_inflow.drop(0)
# 轨道站点出站客流数据
rail_outflow = pd.read_excel(PATH + 'gdzdtjsj_czkl.csv')
rail_outflow = rail_outflow.drop(0)
# 轨道站点闸机设备编码数据
rail_device = pd.read_excel(PATH + 'gdzdkltj_zjbh.csv')
rail_device.columns = [
'LINE_NO', 'STATION_NO', 'STATION_NAME',
'A_IN_MANCHINE', 'A_OUT_MANCHINE',
'B_IN_MANCHINE', 'B_OUT_MANCHINE'
]
rail_device = rail_device.drop(0)
# 得出停车点 LATITUDE 范围
bike_fence['MIN_LATITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.min(x[:, 1]))
bike_fence['MAX_LATITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.max(x[:, 1]))
# 得到停车点 LONGITUDE 范围
bike_fence['MIN_LONGITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.min(x[:, 0]))
bike_fence['MAX_LONGITUDE'] = bike_fence['FENCE_LOC'].apply(lambda x: np.max(x[:, 0]))
from geopy.distance import geodesic
# 根据停车点 范围 计算具体的面积
bike_fence['FENCE_AREA'] = bike_fence.apply(lambda x: geodesic(
(x['MIN_LATITUDE'], x['MIN_LONGITUDE']), (x['MAX_LATITUDE'], x['MAX_LONGITUDE'])
).meters, axis=1)
# 根据停车点 计算中心经纬度
bike_fence['FENCE_CENTER'] = bike_fence['FENCE_LOC'].apply(
lambda x: np.mean(x[:-1, ::-1], 0)
)
import geohash
bike_order['geohash'] = bike_order.apply(
lambda x: geohash.encode(x['LATITUDE'], x['LONGITUDE'], precision=6),
axis=1)
bike_fence['geohash'] = bike_fence['FENCE_CENTER'].apply(
lambda x: geohash.encode(x[0], x[1], precision=6)
)
bike_order['UPDATE_TIME'] = pd.to_datetime(bike_order['UPDATE_TIME'])
bike_order['DAY'] = bike_order['UPDATE_TIME'].dt.day.astype(object)
bike_order['DAY'] = bike_order['DAY'].apply(str)
bike_order['HOUR'] = bike_order['UPDATE_TIME'].dt.hour.astype(object)
bike_order['HOUR'] = bike_order['HOUR'].apply(str)
bike_order['HOUR'] = bike_order['HOUR'].str.pad(width=2,side='left',fillchar='0')
# 日期和时间进行拼接
bike_order['DAY_HOUR'] = bike_order['DAY'] + bike_order['HOUR']
bike_inflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 1],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY_HOUR'], aggfunc='count', fill_value=0
)
bike_outflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 0],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY_HOUR'], aggfunc='count', fill_value=0
)
bike_inflow.loc['wsk593'].plot()
bike_outflow.loc['wsk593'].plot()
plt.xticks(list(range(bike_inflow.shape[1])), bike_inflow.columns, rotation=40)
plt.legend(['入流量', '出流量'])
bike_inflow.loc['wsk52r'].plot()
bike_outflow.loc['wsk52r'].plot()
plt.xticks(list(range(bike_inflow.shape[1])), bike_inflow.columns, rotation=40)
plt.legend(['入流量', '出流量'], prop = prop)
bike_inflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 1],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_outflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 0],
values='LOCK_STATUS', index=['geohash'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_remain = (bike_inflow - bike_outflow).fillna(0)
# 存在骑走的车数量 大于 进来的车数量
bike_remain[bike_remain < 0] = 0
# 按照天求平均
bike_remain = bike_remain.sum(1)
# 总共有993条街
bike_fence['STREET'] = bike_fence['FENCE_ID'].apply(lambda x: x.split('_')[0])
# 留存车辆 / 街道停车位总面积,计算得到密度
bike_density = bike_fence.groupby(['STREET'])['geohash'].unique().apply(
lambda hs: np.sum([bike_remain[x] for x in hs])
) / bike_fence.groupby(['STREET'])['FENCE_AREA'].sum()
# 按照密度倒序
bike_density = bike_density.sort_values(ascending=False).reset_index()
from sklearn.neighbors import NearestNeighbors
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
knn = NearestNeighbors(metric = "haversine", n_jobs=-1, algorithm='brute')
knn.fit(np.stack(bike_fence['FENCE_CENTER'].values))
# 需要11s左右
dist, index = knn.kneighbors(bike_order[['LATITUDE','LONGITUDE']].values[:20000], n_neighbors=1)
import hnswlib
import numpy as np
p = hnswlib.Index(space='l2', dim=2)
p.init_index(max_elements=300000, ef_construction=1000, M=32)
p.set_ef(1024)
p.set_num_threads(14)
p.add_items(np.stack(bike_fence['FENCE_CENTER'].values))
index, dist = p.knn_query(bike_order[['LATITUDE','LONGITUDE']].values[:], k=1)
bike_order['fence'] = bike_fence.iloc[index.flatten()]['FENCE_ID'].values
bike_inflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 1],
values='LOCK_STATUS', index=['fence'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_outflow = pd.pivot_table(bike_order[bike_order['LOCK_STATUS'] == 0],
values='LOCK_STATUS', index=['fence'],
columns=['DAY'], aggfunc='count', fill_value=0
)
bike_remain = (bike_inflow - bike_outflow).fillna(0)
bike_remain[bike_remain < 0] = 0
bike_remain = bike_remain.sum(1)
bike_density = bike_remain / bike_fence.set_index('FENCE_ID')['FENCE_AREA']
bike_density = bike_density.sort_values(ascending=False).reset_index()
bike_density = bike_density.fillna(0)
bike_order['UPDATE_TIME'] = | pd.to_datetime(bike_order['UPDATE_TIME']) | pandas.to_datetime |
import pandas as pd
import sys
import os
import numpy as np
import signatureanalyzer as sa
from typing import Union
import nimfa
from tqdm import tqdm
import sklearn
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from qtl.norm import deseq2_size_factors
import warnings
warnings.filterwarnings("ignore")
def bnmf(df: pd.DataFrame, K: int = 4, n_run: int = 10, **kwargs):
"""
Binary matrix factorization wrapper.
----------------------
Args:
* pd.DataFrame: (features x samples)
Returns:
* H: pd.Dataframe (n_samples x K)
* W: pd.DataFrame (K x N_features)
"""
bmf = nimfa.Bmf(df.values, rank=K, n_run=n_run, **kwargs)
bmf_fit = bmf()
W = | pd.DataFrame(bmf_fit.fit.W, index=df.index) | pandas.DataFrame |
from sklearn.metrics import confusion_matrix, classification_report
from matplotlib.colors import LinearSegmentedColormap
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.pyplot import figure
import os
import warnings
warnings.filterwarnings("ignore")
def heatconmat(y_true, y_pred, homepath, mode="binary"):
cmap_reds = plt.get_cmap("Reds")
num_colors = 50
colors = ["white", "grey"] + [cmap_reds(i / num_colors) for i in range(2, num_colors)]
cmap2 = LinearSegmentedColormap.from_list('', colors, num_colors)
sns.set_context('talk')
df = pd.Series(y_true)
if(mode == "binary"):
plt.figure(figsize=(4,4))
else:
plt.figure(figsize=(18,12))
data = confusion_matrix(df,y_pred)
sns.heatmap(data,
annot=True,
fmt='d',
vmin=0, vmax=num_colors,
cbar=False,
# mask = data <= 0,
#cmap='gist_earth_r',
cmap=cmap2,
yticklabels=sorted(df.unique()))
plt.show()
# plt.imshow(data, interpolation='none')
# plt.colorbar()
# plt.xticks(sorted(df.unique()),fontsize=12)
# plt.yticks(sorted(df.unique()),fontsize=12)
# plt.grid(True)
# plt.show()
print(classification_report(df,y_pred))
# Directory
directory = "plots"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
plt.savefig(homepath+"/plots/plot.png")
def classification(homepath, classifier, mode="binary", save_model=""):
'''
Parameters
----------
`homepath` (str):
Path where you want to save all the generated files
and folders
`classifer` (sklearn's classification model):
Provide the classification model's instance you want
to use. For example: RandomForestClassifier(n_estimators=100).
`classifer` (str):
If you want to use "Neural Network" then just type
"NN". For example: classifier = "NN"
`mode` (str):
There is two mode 1) binary 2) multi. Use "binary"
for binary classification & "multi" for multiclass
classification. (default: mode = "binary")
`save_model` (str):
Optional parameter. Use it only if you want to save
the model. For example: save_model = "your_model_name"
Return:
-------
None
Outputs:
--------
Classification report such as precision, recall, f1-score, mcc, accuracy.
Confusion matrix as plot saved in ~/plots/plot.png. Models saved in
~/models/your_model_name.sav or ~/models/your_model_name.h5
'''
if(mode == "binary"):
import io
import numpy as np
import pandas as pd
from sklearn.metrics import matthews_corrcoef
import os
import joblib
import tensorflow as tf
# from keras.models import Sequential
# from keras.layers import Dense
#train data load
Cancer = pd.read_csv(homepath+"/train_data/bin_Cancer.txt.bz2",header=None, delimiter = "\t")
Normal = pd.read_csv(homepath+"/train_data/bin_Normal.txt.bz2",header=None, delimiter = "\t")
Cancer['Target'] = 1
Normal['Target'] = 0
#Normal = Normal.drop(Normal.index[0])
frame = [Cancer,Normal]
Data = pd.concat(frame,axis=0)
X_train = Data.iloc[:,:len(Data.columns)-1]
y_train = Data.iloc[:,len(Data.columns)-1]
#test data load
Cancer = pd.read_csv(homepath+"/test_data/bin_Cancer.txt.bz2",header=None, delimiter = "\t")
Normal = pd.read_csv(homepath+"/test_data/bin_Normal.txt.bz2",header=None, delimiter = "\t")
Cancer['Target'] = 1
Normal['Target'] = 0
#Normal = Normal.drop(Normal.index[0])
frame = [Cancer,Normal]
Data = pd.concat(frame,axis=0)
Data = Data.drop(Data.index[0])
X_test = Data.iloc[:,:len(Data.columns)-1]
y_test = Data.iloc[:,len(Data.columns)-1]
if(type(classifier) != str ):
clf = classifier
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
heatconmat(y_test, y_pred, homepath, mode)
print("MCC Score: ",matthews_corrcoef(y_test, y_pred))
if(len(save_model) != 0):
# Directory
directory = "model_save"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
joblib.dump(clf, homepath+"/model_save/"+save_model+"_"+mode+".sav")
elif(type(classifier) == str and classifier == "NN"):
# define the keras model
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1024, input_dim=len(Data.columns)-1, activation='relu'))
model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(32, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# compile the keras model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the keras model on the dataset
model.fit(X_train, y_train, epochs=100, batch_size=100)
y_pred = model.predict_classes(X_test)
y_pred_seris = pd.Series(y_pred.flatten())
heatconmat(y_test, y_pred_seris, homepath, mode)
print("MCC Score: ",matthews_corrcoef(y_test, y_pred))
if(len(save_model) != 0):
# Directory
directory = "model_save"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
model.save(homepath+"/model_save/"+save_model+"_"+mode+".h5")
else:
print("classifier error. please check your 'classifier' parameter")
return
elif(mode == "multi"):
import io
import numpy as np
import pandas as pd
from sklearn.metrics import matthews_corrcoef
import os
import joblib
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import tensorflow as tf
#train load
Cancer = pd.read_csv(homepath+"/train_data/mul_Cancer.txt.bz2",header=None, delimiter = "\t")
Normal = | pd.read_csv(homepath+"/train_data/mul_Normal.txt.bz2",header=None, delimiter = "\t") | pandas.read_csv |
import seaborn as sns
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
from pysal.lib import weights
from sklearn import cluster
from shapely.geometry import Point
# # # # # PET DATA # # # # #
# filename = "pets.json"
# with open(filename, 'r') as f:
# objects = ijson.items
# austin dangerous dog api
urlD = 'https://data.austintexas.gov/resource/ykw4-j3aj.json'
# austin stray dog data
urlS = 'https://data.austintexas.gov/resource/hye6-gvq2.json'
# found_df / austin found pets pandas data frame constructor
pets_df = pd.read_json(urlS, orient='records')
location_df = json_normalize(pets_df['location'])
concat_df = pd.concat([pets_df, location_df], axis=1)
found_df = concat_df.drop(concat_df.columns[0:7], axis=1)
found_df = found_df.drop(found_df.columns[[2, 4, 6, 10]], axis=1)
address_df = pd.DataFrame(columns=['address', 'city', 'zip_code'])
for i, row in location_df.iterrows():
rowStr = row['human_address']
splitRow = rowStr.split('\"')
address = splitRow[3]
city = splitRow[7]
zipCode = splitRow[15]
address_df = address_df.append({'address': address, 'city': city, 'zip_code': zipCode}, ignore_index=True)
found_df = pd.concat([found_df, address_df], axis=1)
# formatting address correctly
for i, row in found_df.iterrows():
rowStr = row['city']
splitRow = rowStr.split(' ')
# ADD MORE LOCALITIES HERE IF NEEDED IN DATASET
if splitRow[0] not in ('AUSTIN', 'PFLUGERVILLE', 'LAKEWAY', ''):
for j in splitRow:
if j in ('AUSTIN', 'PFLUGERVILLE', 'LAKEWAY'):
found_df.at[i, 'city'] = j
else:
found_df.at[i, 'city'] = ''
found_df.at[i, 'address'] = ''
# danger_df austin dangerous dogs pandas data frame constructor
danger_df = | pd.read_json(urlD) | pandas.read_json |
import os
import cv2
import json
import dlib
import shutil
import joblib
import exifread
import warnings
import numpy as np
import pandas as pd
import face_recognition
from pathlib import Path
from joblib import Parallel, delayed
def get_model(cfg):
from tensorflow.keras import applications
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD, Adam
base_model = getattr(applications, cfg.model.model_name)(
include_top=False,
input_shape=(cfg.model.img_size, cfg.model.img_size, 3),
pooling="avg"
)
features = base_model.output
pred_gender = Dense(units=2, activation="softmax", name="pred_gender")(features)
pred_age = Dense(units=101, activation="softmax", name="pred_age")(features)
model = Model(inputs=base_model.input, outputs=[pred_gender, pred_age])
return model
detector = dlib.get_frontal_face_detector()
def overwrite(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def extract(source_dir, age_gender=False, exif=False):
from omegaconf import OmegaConf
from tensorflow.keras.utils import get_file
global output_dir, network_dir, face_dir, detector, model
output_dir=os.path.join(Path(source_dir), "Face Network/")
network_dir=os.path.join(output_dir, "Data/")
face_dir=os.path.join(output_dir, "Faces/")
overwrite(output_dir)
overwrite(network_dir)
overwrite(face_dir)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
pretrained_model = "https://github.com/yu4u/age-gender-estimation/releases/download/v0.6/EfficientNetB3_224_weights.11-3.44.hdf5"
modhash = '6d7f7b7ced093a8b3ef6399163da6ece'
weight_file = get_file("EfficientNetB3_224_weights.11-3.44.hdf5", pretrained_model, cache_subdir="pretrained_models",
file_hash=modhash, cache_dir=str(Path(__file__).resolve().parent))
# load model and weights
model_name, img_size = Path(weight_file).stem.split("_")[:2]
img_size = int(img_size)
cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
model = get_model(cfg)
model.load_weights(weight_file)
img_list=makelist('.jpg', source_dir=source_dir)
all_images=pd.DataFrame()
count=len(img_list)
print("Analyzing {} images".format(count))
cpus=joblib.cpu_count()-1
rows=Parallel(n_jobs=cpus)(delayed(crop_face)(a, face_dir, age_gender) for a in img_list)
all_images=pd.concat(rows)
all_images.to_hdf(network_dir+'FaceDatabase.h5', 'index', 'w',complevel=9)
print("Face images stored in:", network_dir)
return all_images
def makelist(extension, source_dir):
templist=[]
for subdir, dirs, files in os.walk(source_dir):
dirs[:] = [d for d in dirs if d not in 'Faces']
for file in files:
if extension in os.path.join(subdir, file):
f=os.path.join(subdir, file)
templist.append(f)
return templist
def crop_face(image_path, face_dir, model, age_gender=False, exif=False):
import face_recognition
img_name=image_path.split('/')[-1]
img = cv2.imread(str(image_path), 1)
if img is not None:
h, w, _ = img.shape
r = 1080 / max(w, h)
img=cv2.resize(img, (int(w * r), int(h * r)), interpolation = cv2.INTER_AREA)
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = np.shape(input_img)
# detect faces using dlib detector
detected = detector(input_img, 1)
faces = np.empty((len(detected), 224, 224, 3))
rows=pd.DataFrame()
if len(detected) > 0:
for i, d in enumerate(detected):
margin=0.4
face_img_name="face{}_{}".format(str(i+1), img_name)
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
xw1 = max(int(x1 - margin * w), 0)
yw1 = max(int(y1 - margin * h), 0)
xw2 = min(int(x2 + margin * w), img_w - 1)
yw2 = min(int(y2 + margin * h), img_h - 1)
crop_face=img[yw1:yw2 + 1, xw1:xw2 + 1]
encoding = face_recognition.face_encodings(crop_face)
faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (224, 224), interpolation = cv2.INTER_AREA)
if len(encoding)==0:
break
if age_gender:
# predict ages and genders of the detected faces
results = model.predict(faces)
predicted_genders = results[0]
ages = np.arange(0, 101).reshape(101, 1)
predicted_ages = results[1].dot(ages).flatten()
age=int(predicted_ages[i])
gender=predicted_genders[i][0]
img_row = dict({
'img_path':image_path,
'img_name':img_name,
'face_name':face_img_name,
'encoding': encoding[0],
'age':age,
'gender':gender
})
else:
img_row = dict({
'img_path':image_path,
'img_name':img_name,
'face_name':face_img_name,
'encoding': encoding[0]
})
rows=rows.append(img_row, ignore_index=True)
cv2.imwrite(face_dir+face_img_name, crop_face)
return rows
def match(row, results, core=False):
# To assess the quality of the clusters, this function calculates the cosine distance between facial encodings within the same cluster.
if row['cluster']>=0:
#get the facial encoding and cluster ID of the reference face
face=row['encoding']
cluster=row['cluster']
# Get the face that is most similar to the other faces in the cluster (the "best" face)
if core:
sub=results[results['cluster']==cluster].sort_values(by='cluster_distance',ascending=True).iloc[0]
sub_encoding=[np.array(sub['encoding'])]
# Isolate faces in the same cluster as the reference face
else:
sub=results[results['cluster']==cluster]
sub_encoding=list(sub['encoding'])
# calculate the mean cosine distance between the reference face and all the other faces in this cluster
# if core=True, calculate the cosine distance between ther reference face and the "best" face in the cluster
matches = face_recognition.face_distance(face, sub_encoding)
mean_score=int(np.mean(matches)*100)
else:
mean_score=np.NaN
return mean_score
def cluster(source_dir, algorithm='DBSCAN', initial_eps=0.44, iterations=1, max_distance=50, mosaic=True, plot=False):
from sklearn.cluster import DBSCAN
from sklearn.cluster import OPTICS
from sklearn.cluster import AgglomerativeClustering
from sklearn import preprocessing
global network_dir, face_db, cluster_dir, output_dir
output_dir=os.path.join(source_dir, "Face Network/")
network_dir=os.path.join(output_dir, "Data/")
face_db=pd.read_hdf(network_dir+"FaceDatabase.h5")
cluster_dir=os.path.join(output_dir, "Clusters/")
face_dir=os.path.join(output_dir, "Faces/")
if algorithm=='chinese_whispers':
final_results=chinese_whispers(source_dir, threshold=initial_eps, mosaic=mosaic, plot=plot)
return final_results
# Create empty df to store results
final_results=pd.DataFrame()
exit=False
for i in range(1,iterations+1):
print('Iteration {}, Algorithm:{}, EPS: {}'.format(i,algorithm,initial_eps))
encodings=list(face_db['encoding'])
face_names=list(face_db['face_name'])
img_names=list(face_db['img_name'])
if algorithm=='OPTICS':
clt = OPTICS()
clt.fit(encodings)
exit=True
if algorithm=='DBSCAN':
# Decrease EPS by 0.01 each iteration
eps=initial_eps-(i/100)
clt = DBSCAN(eps=eps, min_samples=3, n_jobs=-1, metric='euclidean', algorithm='kd_tree')
clt.fit(encodings)
if algorithm=='AHC':
eps=3-.2
clt = AgglomerativeClustering(distance_threshold=eps, compute_full_tree=True, n_clusters=None)
# Conduct clustering and save results to a dataframe
model=clt.fit(encodings)
clt.labels_=clt.labels_#+1
#plot_dendrogram(model, img_names)
results=pd.DataFrame({'face_name':face_names, 'img_name':img_names, 'cluster':clt.labels_, 'encoding':encodings})
def parallel_apply(chunk, df, core=False):
if core:
chunk['cluster_distance_core']=chunk.apply(lambda x: match(x, df, core=True), axis=1)
else:
chunk['cluster_distance']=chunk.apply(lambda x: match(x, df), axis=1)
return chunk
cpus=joblib.cpu_count()-1
df_split = np.array_split(results, cpus)
rows=Parallel(n_jobs=cpus)(delayed(parallel_apply)(chunk, results) for chunk in df_split)
results=pd.concat(rows)
rows=Parallel(n_jobs=cpus)(delayed(parallel_apply)(chunk, results, core=True) for chunk in df_split)
results=pd.concat(rows)
# Small clusters and faces with high cosine distance (bad matches) are assigned to a bin cluster with ID -2
results['cluster']=np.where(results['cluster_distance_core']>max_distance+10,-2,results['cluster'])
counts=results.groupby('cluster')['face_name'].count().reset_index().rename(columns={'face_name':'count'})
results=results.merge(counts, how='left',on='cluster')
results['cluster']=np.where(results['count']<5,-2,results['cluster'])
results=results.drop(columns='count')
# Calculate the median cosine distance and percentage of outliers for each cluster.
outliers=results.groupby('cluster')[['cluster_distance_core']].agg({'cluster_distance_core':'median'}).reset_index().rename(columns={'cluster_distance_core':'cluster_distance_mean'})
results=results.merge(outliers, how='left',on='cluster')
# Assign clusters with a high average cosine distance and those in the bin clusters (-1, -2) to face_db for reanalysis
# Add faces in clusters with low average cosine distance (<40) to final output
face_db=results[(results['cluster_distance_mean']>max_distance) | (results['cluster']<0)]
results=results[(results['cluster_distance_mean']<=max_distance) & (results['cluster']>=0)]
# Count the number of images in each cluster
counts=results.groupby('cluster')['face_name'].count().reset_index().rename(columns={'face_name':'count'})
results=results.merge(counts, how='left',on='cluster')
# Generate a cluster code; the first four numbers indicate the number of the iteration, followed by the cluster ID.
results['cluster']=results['cluster'].apply(lambda x: int((str(i)*4 )+ str(x)))
final_results=final_results.append(results)
print("Matched: ", len(final_results),"(+{})".format(len(results)))
print("Unmatched: ", len(face_db))
#exit=True
# When no new matches are found, switch to a more flexible clustering algorithm for the final pass.
# OPTICS allows for clusters of varying densities.
if i>(iterations-1)/2:
algorithm='DBSCAN'
#if (len(results) ==0 or i==iterations-1):
# algorithm='OPTICS'
if (len(results) ==0 or len(face_db)==0):
exit=True
if exit:
break
face_db['cluster']=-2
final_results=final_results.append(face_db).sort_values(by='count',ascending=False)
le=preprocessing.LabelEncoder()
le.fit(final_results['cluster'])
final_results['cluster']=le.transform(final_results['cluster'])
final_results.reset_index(inplace=False)
final_results.to_hdf(network_dir+'FaceDatabase.h5', 'index', 'w',complevel=9)
if mosaic:
# build a mosaic of face tiles for each cluster
overwrite(cluster_dir)
clusters=final_results['cluster'].unique().tolist()
clusters = [ elem for elem in clusters if elem > 0]
cpus=joblib.cpu_count()-1
rows=Parallel(n_jobs=cpus)(delayed(build_mosaic)(cluster,final_results,face_dir,cluster_dir) for cluster in clusters)
return final_results
def chinese_whispers(source_dir, threshold=0.55, iterations=20, mosaic=True, plot=False):
""" Chinese Whispers Algorithm
Modified from <NAME>' implementation,
http://alexloveless.co.uk/data/chinese-whispers-graph-clustering-in-python/
Inputs:
encoding_list: a list of facial encodings from face_recognition
threshold: facial match threshold,default 0.6
iterations: since chinese whispers is an iterative algorithm, number of times to iterate
Outputs:
sorted_clusters: a list of clusters, a cluster being a list of imagepaths,
sorted by largest cluster to smallest
"""
output_dir=os.path.join(source_dir, "Face Network/")
network_dir=os.path.join(output_dir, "Data/")
face_db=pd.read_hdf(network_dir+"FaceDatabase.h5")
cluster_dir=os.path.join(output_dir, "Clusters/")
face_dir=os.path.join(output_dir, "Faces/")
encodings= list(face_db['encoding'])
image_paths=list(face_db['face_name'])
from random import shuffle
import networkx as nx
# Create graph
nodes = []
edges = []
if len(encodings) <= 1:
print ("No enough encodings to cluster!")
return []
for idx, face_encoding_to_check in enumerate(encodings):
# Adding node of facial encoding
node_id = idx+1
# Initialize 'cluster' to unique value (cluster of itself)
node = (node_id, {'cluster': image_paths[idx], 'path': image_paths[idx]})
nodes.append(node)
# Facial encodings to compare
if (idx+1) >= len(encodings):
# Node is last element, don't create edge
break
compare_encodings = encodings[idx+1:]
distances = face_recognition.face_distance(compare_encodings, face_encoding_to_check)
encoding_edges = []
for i, distance in enumerate(distances):
if distance < threshold:
# Add edge if facial match
edge_id = idx+i+2
encoding_edges.append((node_id, edge_id, {'weight': distance}))
edges = edges + encoding_edges
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
# Iterate
for _ in range(0, iterations):
cluster_nodes = G.nodes()
#shuffle(cluster_nodes)
for node in cluster_nodes:
neighbors = G[node]
clusters = {}
for ne in neighbors:
if isinstance(ne, int):
if G.nodes[ne]['cluster'] in clusters:
clusters[G.nodes[ne]['cluster']] += G[node][ne]['weight']
else:
clusters[G.nodes[ne]['cluster']] = G[node][ne]['weight']
# find the class with the highest edge weight sum
edge_weight_sum = 0
max_cluster = 0
#use the max sum of neighbor weights class as current node's class
for cluster in clusters:
if clusters[cluster] > edge_weight_sum:
edge_weight_sum = clusters[cluster]
max_cluster = cluster
# set the class of target node to the winning local class
G.nodes[node]['cluster'] = max_cluster
if plot:
print(G.nodes)
import matplotlib.pyplot as plt
nx.draw(G, node_size=2, edge_color='grey')
plt.show()
clusters = {}
# Prepare cluster output
for (_, data) in G.nodes.items():
cluster = data['cluster']
path = data['path']
#print(cluster, path)
if cluster:
if cluster not in clusters:
clusters[cluster] = []
clusters[cluster].append(path)
# Sort cluster output
sorted_clusters = sorted(clusters.values(), key=len, reverse=True)
length=[]
cluster_master=pd.DataFrame()
count=0
for cluster in sorted_clusters:
count+=1
cluster_df=pd.DataFrame({'cluster':count,'face_name':cluster})
cluster_master=cluster_master.append(cluster_df)
if 'cluster' in face_db:
face_db=face_db.drop(columns=['cluster'])
if 'count' in face_db:
face_db=face_db.drop(columns=['count'])
face_db=face_db.merge(cluster_master, on='face_name',how='left')
face_db['cluster']=face_db['cluster'].fillna(-1).astype(int)
counts=face_db.groupby('cluster')['face_name'].count().reset_index().rename(columns={'face_name':'count'})
face_db=face_db.merge(counts, how='left',on='cluster')
print(face_db)
face_db.to_hdf(network_dir+'FaceDatabase.h5', 'index', 'w',complevel=9)
if mosaic:
# build a mosaic of face tiles for each cluster
overwrite(cluster_dir)
clusters=face_db['cluster'].unique().tolist()
clusters = [ elem for elem in clusters if elem > 0]
cpus=joblib.cpu_count()-1
rows=Parallel(n_jobs=cpus)(delayed(build_mosaic)(cluster,face_db,face_dir,cluster_dir) for cluster in clusters)
return face_db
def network(source_dir, scale=10):
from pyvis.network import Network
import networkx as nx
global network_dir, face_db, face_dir, output_dir
output_dir=os.path.join(source_dir, "Face Network/")
face_dir=os.path.join(output_dir, "Faces/")
network_dir=os.path.join(output_dir, "Data/")
face_db= | pd.read_hdf(network_dir+"FaceDatabase.h5") | pandas.read_hdf |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import etherscan as es
from pycoingecko import CoinGeckoAPI
import pandas as pd
import numpy as np
import datetime as dt
import time
from functools import reduce
import matplotlib.pyplot as plt
# Global variables
cg_api = CoinGeckoAPI()
coin_dict = pd.DataFrame(cg_api.get_coins_list())
class MarketData:
def __init__( self, token, currency ):
self.token_list = token
self.currency = currency
def last_price( self ):
if type(self.token_list) == list:
px = {}
for tkn in self.token_list:
px[tkn] = cg_api.get_coin_by_id(tkn)['market_data']['current_price'][self.currency]
return px
elif type(self.token_list) == str:
px = cg_api.get_coin_by_id(self.token_list)['market_data']['current_price'][self.currency]
return px
def px_last( self ):
return self.last_price()
def high_24h( self ):
if type(self.token_list) == list:
px = {}
for tkn in self.token_list:
px[tkn] = cg_api.get_coin_by_id(tkn)['market_data']['high_24h'][self.currency]
return px
elif type(self.token_list) == str:
px = cg_api.get_coin_by_id(self.token_list)['market_data']['high_24h'][self.currency]
return px
def px_high( self ):
return self.high_24h()
def low_24h( self ):
if type(self.token_list) == list:
px = {}
for tkn in self.token_list:
px[tkn] = cg_api.get_coin_by_id(tkn)['market_data']['low_24h'][self.currency]
return px
elif type(self.token_list) == str:
px = cg_api.get_coin_by_id(self.token_list)['market_data']['low_24h'][self.currency]
return px
def px_low( self ):
return self.low_24h()
def price_change_24h_in_currency( self ):
if type(self.token_list) == list:
px = {}
for tkn in self.token_list:
px_change_24h = cg_api.get_coin_by_id(tkn)['market_data']['price_change_24h_in_currency'][self.currency]
px[tkn] = px_change_24h
return px
elif type(self.token_list) == str:
px_change_24h = cg_api.get_coin_by_id(self.token_list)['market_data']['price_change_24h_in_currency'][self.currency]
return px_change_24h
def px_change_24h( self ):
return self.price_change_24h_in_currency()
def total_volume( self ):
if type(self.token_list) == list:
px = {}
for tkn in self.token_list:
px[tkn] = cg_api.get_coin_by_id(tkn)['market_data']['total_volume'][self.currency]
return px
elif type(self.token_list) == str:
vol = cg_api.get_coin_by_id(self.token_list)['market_data']['total_volume'][self.currency]
return vol
def volume( self ):
return self.total_volume()
def market_data( self ):
if type(self.token_list) == list:
df = pd.DataFrame( index = self.token_list, columns = ['px_last','px_low','px_high','chg_24h','volume'])
df['px_low'] = pd.Series(self.px_low())
df['px_high'] = pd.Series(self.px_high())
df['px_last'] = pd.Series(self.px_last())
df['chg_24h'] = pd.Series(self.px_change_24h())
df['volume'] = pd.Series(self.volume())
final = coin_dict.loc[coin_dict.id.isin(self.token_list)]
elif type(self.token_list) == str:
df = pd.DataFrame(index = [self.token_list])
df['px_last'] = self.px_last()
df['px_low'] = self.px_low()
df['px_high'] = self.px_high()
df['chg_24h'] = self.px_change_24h()
df['volume'] = self.volume()
final = coin_dict.loc[coin_dict.id.isin([self.token_list])]
# Give Name and Symbol
final = final.set_index("id")
final = final.join(df,on="id")
final = final.set_index("symbol")
del final.index.name
return final
class HistoricalMarketData:
def __init__( self, token, currency, start_date, end_date ):
self.token_list = token
self.currency = currency
start_date = int(time.mktime(dt.datetime.strptime(start_date, "%Y-%m-%d").timetuple()))
self.start_date = start_date
end_date = int(time.mktime(dt.datetime.strptime(end_date, "%Y-%m-%d").timetuple()))
self.end_date = end_date
def hist_mkt_data( self, feature ):
if type(self.token_list) == list:
hist_data = []
for tkn in self.token_list:
hist_tkn = cg_api.get_coin_market_chart_range_by_id(tkn,self.currency,self.start_date,self.end_date)[feature]
hist_tkn = pd.DataFrame(hist_tkn,columns=["date",tkn])
hist_tkn.date = pd.to_datetime(hist_tkn.date,unit='ms')
hist_tkn = hist_tkn.set_index("date",True)
hist_data.append(hist_tkn)
df_data = pd.concat(hist_data,axis=1)
del df_data.index.name
return df_data
elif type(self.token_list) == str:
hist_tkn = cg_api.get_coin_market_chart_range_by_id(self.token_list,self.currency,self.start_date,self.end_date)[feature]
hist_tkn = pd.DataFrame(hist_tkn,columns=["date",self.token_list])
hist_tkn.date = pd.to_datetime(hist_tkn.date,unit='ms')
hist_tkn = hist_tkn.set_index("date",True)
del hist_tkn.index.name
return hist_tkn
def price( self ):
return self.hist_mkt_data( "prices" )
def volume( self ):
return self.hist_mkt_data( "total_volumes" )
def market_cap( self ):
return self.hist_mkt_data( "market_caps" )
def all_data( self ):
if type(self.token_list) == list:
df1 = self.hist_mkt_data( "prices" )
df2 = self.hist_mkt_data( "total_volumes" )
df3 = self.hist_mkt_data( "market_caps" )
df4 = pd.concat([df1,df2,df3],axis=1)
cols = [("price",tkn) for tkn in self.token_list] + [("volume",tkn) for tkn in self.token_list] + [("market_cap",tkn) for tkn in self.token_list]
df4.columns = | pd.MultiIndex.from_tuples(cols) | pandas.MultiIndex.from_tuples |
import statsmodels.formula.api as smf
import numpy as np
import torch
from torch import nn
import pandas as pd
import scipy as sp
from tqdm.auto import tqdm
from boardlaw import sql, elos
import aljpy
from pavlov import stats, runs
import pandas as pd
from boardlaw import arena
# All Elos internally go as e^d; Elos in public are in base 10^(d/400)
ELO = 400/np.log(10)
GLOBAL_GAMES = 1024
@aljpy.autocache()
def _trial_elos(boardsize, counter):
# So in the paper we have two evaluation schemes: one where 1024 games are played between all agents,
# and another where >>64k games are played against the best agent. Both of the these evaluation schemes
# are saved in the same database, so to stop the 64k-results skewing everything, we grab the first 1000
# games played by each pair.
trials = (sql.trial_query(boardsize, 'bee/%')
.query('black_wins + white_wins >= 512')
.groupby(['black_agent', 'white_agent'])
.first().reset_index())
ws, gs = elos.symmetrize(trials)
return elos.solve(ws, gs)
def trial_elos(boardsize):
counter = sql.file_change_counter()
return _trial_elos(boardsize, counter)
def load():
ags = sql.agent_query().query('test_c == 1/16')
es = []
for b in tqdm(ags.boardsize.unique()):
es.append(trial_elos(b))
es = pd.concat(es)
return ags.join(es, how='inner')
def with_times(ags):
rates = {}
for r in ags.run.unique():
arr = stats.array(r, 'count.samples')
s, t = arr['total'], arr['_time']
rates[r] = 1e6*(s.sum() - s[0])/(t[-1] - t[0]).astype(float)
rates = pd.Series(rates, name='sample_rate')
aug = pd.merge(ags, rates, left_on='run', right_index=True)
aug['train_time'] = aug.samples/aug.sample_rate
return aug
def interp_curves(g, x='train_flops', y='elo', group='run'):
xl, xr = g[x].pipe(np.log10).min(), g[x].pipe(np.log10).max()
xs = np.linspace(xl, xr, 101)
ys = {}
for run, gg in g.sort_values(x).groupby(group):
xp = gg[x].pipe(np.log10).values
yp = gg[y].values
ys[run] = np.interp(xs, xp, yp, np.nan, np.nan)
return pd.DataFrame(ys, index=10**xs)
def interp_frontier(g, x='train_flops', y='elo', **kwargs):
ys = interp_curves(g, x=x, y=y, **kwargs)
return ys.ffill().max(1).rename_axis(index=x).rename(y)
class Changepoint(nn.Module):
def __init__(self):
super().__init__()
# Boardsize, offset
self.plateau = nn.Parameter(torch.as_tensor([-1.5, 3.]))
# Flops, boardsize, offset
self.incline = nn.Parameter(torch.as_tensor([2., -2, -16]))
def forward(self, X):
X = torch.cat([X, torch.ones_like(X[:, :1])], -1)
plateau = X[:, 1:] @ self.plateau
incline = X @ self.incline
return torch.maximum(incline, plateau).clamp(None, 0)
class Sigmoid(nn.Module):
def __init__(self):
super().__init__()
self.scale = nn.Parameter(torch.as_tensor([1/16., 0.]))
self.height = nn.Parameter(torch.as_tensor(1.3))
self.center = nn.Parameter(torch.as_tensor([.66, 9.]))
def forward(self, X):
X = torch.cat([X, torch.ones_like(X[:, :1])], -1)
hscale = X[:, 1:] @ self.scale
vscale = hscale * self.height
center = X[:, 1:] @ self.center
return vscale*(torch.sigmoid((X[:, 0] - center)/hscale) - 1)
def model_inputs(df):
return torch.stack([
torch.as_tensor(df.train_flops.values).log10().float(),
torch.as_tensor(df.boardsize.values).float(),], -1)
def fit_model(df):
X = model_inputs(df)
y = torch.as_tensor(df.elo.values)
model = Changepoint()
optim = torch.optim.LBFGS(model.parameters(), line_search_fn='strong_wolfe', max_iter=100)
def closure():
yhat = model(X)
loss = (y - yhat).pow(2).mean()
optim.zero_grad()
loss.backward()
return loss
optim.step(closure)
return model
def apply_model(model, df):
X = model_inputs(df)
return pd.Series(model(X).detach().cpu().numpy(), df.index)
def perfect_play(model, target=-50):
perfect = {}
for b in range(3, 10):
f = lambda x: 400/np.log(10)*model(torch.as_tensor([[x, b]])).detach().numpy().squeeze() - target
perfect[b] = sp.optimize.bisect(f, 1, 18)
return pd.Series(perfect, name='perfect')
def modelled_elos(ags):
df = (ags.query('test_nodes == 64')
.groupby('boardsize')
.apply(interp_frontier, 'train_flops')
.reset_index())
model = fit_model(df)
df['elohat'] = apply_model(model, df)
return df, model
def residual_vars(ags):
df = (ags.query('test_nodes == 64')
.groupby('boardsize')
.apply(interp_frontier, 'train_flops')
.reset_index())
yhats = {}
for b in range(4, 10):
model = fit_model(df[df.boardsize <= b])
yhats[b] = apply_model(model, df[df.boardsize >= b])
yhats = | pd.concat(yhats, 1) | pandas.concat |
""" configuration run result """
import pandas
from datetime import datetime
from decimal import Decimal
from .connection import get_connection
def _get_connection():
_cnxn = get_connection()
return _cnxn
def insert(result):
_cnxn = _get_connection()
cursor = _cnxn.cursor()
with cursor.execute("""
INSERT INTO [dbo].[wsrt_run_result]
(
[TotalNetProfit],
[GrossProfit],
[GrossLoss],
[ProfitFactor],
[ExpectedPayoff],
[AbsoluteDrawdown],
[MaximalDrawdown],
[RelativeDrawdown],
[TotalTrades],
[RunFinishDateTimeUtc]
)
VALUES
(
?,?,?,?,?,?,?,?,?,?
)
""",
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
datetime.utcnow()):
pass
_cnxn.commit()
_cnxn.close()
def mark_as_processing(id):
_cnxn = _get_connection()
cursor = _cnxn.cursor()
tsql = "UPDATE [dbo].[wsrt_run_result] SET [RunStartDateTimeUtc] = ? WHERE [ResultId] = ?"
with cursor.execute(tsql, datetime.utcnow(), id):
pass
_cnxn.commit()
_cnxn.close()
def get_for_processing_by_run_id(run_id):
run_result = None
_cnxn = _get_connection()
cursor = _cnxn.cursor()
tsql = """
SELECT TOP 1
[ResultId],
[RunId],
[OptionId]
FROM
[dbo].[wsrt_run_result]
WHERE
[RunId] = ?
AND [RunStartDateTimeUtc] IS NULL
"""
with cursor.execute(tsql, run_id):
row = cursor.fetchone()
if row:
run_result = dict(zip([column[0] for column in cursor.description], row))
if run_result:
# mark result as being processed, so other terminals not to pick it
process_tsql = """
UPDATE [dbo].[wsrt_run_result] SET [RunStartDateTimeUtc] = ?
WHERE [ResultId] = ?
"""
with cursor.execute(process_tsql, datetime.utcnow(), run_result['ResultId']):
_cnxn.commit()
_cnxn.close()
return run_result
def get_completed_run_results_by_configuration_id(configuration_id):
tsql = """
SELECT rr.* from dbo.wsrt_run_result rr
WHERE
rr.RunId
IN (SELECT r.RunId FROM dbo.wsrt_run r WHERE r.ConfigurationId = ?)
AND rr.RunFinishDateTimeUtc IS NOT NULL
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, configuration_id)
rows = [dict(zip([column[0] for column in cursor.description], row)) for row in cursor.fetchall()]
cursor.close()
del cursor
cnxn.close()
return rows
def update_run_result_with_report(report):
""" """
_cnxn = _get_connection()
cursor = _cnxn.cursor()
tsql = """
UPDATE [dbo].[wsrt_run_result] SET
[TotalNetProfit] = ?,
[GrossProfit] = ?,
[GrossLoss] = ?,
[ProfitFactor] = ?,
[ExpectedPayoff] = ?,
[AbsoluteDrawdown] = ?,
[MaximalDrawdown] = ?,
[TotalTrades] = ?,
[RunFinishDateTimeUtc] = ?
WHERE
[ResultId] = ?
"""
cursor.execute(tsql,
Decimal(report['TotalNetProfit']) if report['TotalNetProfit'] is not None else None,
Decimal(report['GrossProfit']) if report['GrossProfit'] is not None else None,
Decimal(report['GrossLoss']) if report['GrossLoss'] is not None else None,
Decimal(report['ProfitFactor']) if report['ProfitFactor'] is not None else None,
Decimal(report['ExpectedPayoff']) if report['ExpectedPayoff'] is not None else None,
Decimal(report['AbsoluteDrawdown']) if report['AbsoluteDrawdown'] is not None else None,
Decimal(report['MaximalDrawdown']) if report['MaximalDrawdown'] is not None else None,
int(report['TotalTrades']) if report['TotalTrades'] is not None else None,
datetime.utcnow(),
report['ResultId'])
for trade in report['Trades']:
add_run_result_trade(cursor, report['ResultId'], trade)
cursor.close()
del cursor
_cnxn.commit()
_cnxn.close()
def add_run_result_trade(cursor, result_id, trade):
tsql = """
INSERT INTO [dbo].[wsrt_run_result_trade]
(
[ResultId],
[OpenTime],
[Type],
[CloseTime],
[Profit]
)
VALUES
(
?,?,?,?,?
)
"""
cursor.execute(tsql, result_id, trade['OpenTime'], trade['Type'], trade['CloseTime'], trade['Profit'])
def get_run_result_trades_by_result_id(result_id):
tsql = """
SELECT
rrt.*
FROM
dbo.wsrt_run_result_trade rrt
WHERE
rrt.ResultId = ?
ORDER BY
rrt.CloseTime ASC
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, result_id)
rows = [dict(zip([column[0] for column in cursor.description], row)) for row in cursor.fetchall()]
df = pandas.DataFrame(rows)
cursor.close()
del cursor
cnxn.close()
return df
def remove_run_result_trades_by_configuration_id(configuration_id):
tsql = """
DELETE FROM dbo.wsrt_run_result_trade
WHERE [ResultId] IN
(SELECT rr.[ResultId] FROM dbo.wsrt_run_result rr WHERE rr.[RunId] IN
(SELECT r.[RunId] FROM dbo.wsrt_run r WHERE r.[ConfigurationId] = ?)
)
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, configuration_id)
cursor.close()
del cursor
cnxn.commit()
cnxn.close()
def reset_run_results_by_configuration_id(configuration_id):
tsql = """
UPDATE
dbo.wsrt_run_result
SET
[RunStartDateTimeUtc] = NULL
WHERE
[RunId] IN
(SELECT r.[RunId] FROM dbo.wsrt_run r WHERE r.[ConfigurationId] = ?)
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, configuration_id)
cursor.close()
del cursor
cnxn.commit()
cnxn.close()
def get_run_result_trades_summary_by_configuration_id(configuration_id):
tsql = """
select
rr.RunId,
rrt.ResultId,
COUNT(rrt.ResultId) as NumTrades,
Max(rrt.CloseTime) as MaxCloseTime
from
dbo.wsrt_run_result_trade rrt
inner join dbo.wsrt_run_result rr on rrt.ResultId = rr.ResultId
where rr.RunId in (select r.RunId from dbo.wsrt_run r where r.ConfigurationId = ?)
group by rr.RunId, rrt.ResultId
order by rrt.ResultId
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, configuration_id)
rows = [dict(zip([column[0] for column in cursor.description], row)) for row in cursor.fetchall()]
df = | pandas.DataFrame(rows) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime
import pandas as pd
from kavalkilu import LogWithInflux
from servertools import (
SlackWeatherNotification,
OpenWeather,
OWMLocation,
Plants,
Plant
)
# Initiate Log, including a suffix to the log name to denote which instance of log is running
log = LogWithInflux('plant_warn', log_dir='weather')
now = datetime.now()
# Instantiate plants
plants = Plants()
weather = OpenWeather(OWMLocation.ATX)
hours_df = weather.get_hourly_forecast()
# Convert date to datetime
hours_df['date'] = pd.to_datetime(hours_df['date'])
# Filter by column & get only the next 12 hours of forecasted weather
cols = ['date', 'temp-min']
hours_df = hours_df.loc[hours_df.date < (now + | pd.Timedelta(hours=24) | pandas.Timedelta |
# $Id$
# $HeadURL$
################################################################
# The contents of this file are subject to the BSD 3Clause (New)
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2012
# All Rights Reserved.
# Contributor(s): <NAME>, <NAME>, <NAME>, <NAME>
################################################################
"""
This module provides functions for plotting cartesian and polar plots.
This class provides a basic plotting capability, with a minimum
number of lines. These are all wrapper functions,
based on existing functions in other Python classes.
Provision is made for combinations of linear and log scales, as well
as polar plots for two-dimensional graphs.
The Plotter class can save files to disk in a number of formats.
For more examples of use see:
https://github.com/NelisW/ComputationalRadiometry
See the __main__ function for examples of use.
This package was partly developed to provide additional material in support of students
and readers of the book Electro-Optical System Analysis and Design: A Radiometry
Perspective, <NAME>, ISBN 9780819495693, SPIE Monograph Volume
PM236, SPIE Press, 2013. http://spie.org/x648.html?product_id=2021423&origin_id=x646
"""
__version__ = "$Revision$"
__author__ = 'pyradi team'
__all__ = ['Plotter','cubehelixcmap', 'FilledMarker', 'Markers','ProcessImage',
'savePlot']
import numpy as np
import pandas as pd
import math
import sys
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1 import make_axes_locatable
# following for the pie plots
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist.grid_finder import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.colors import LinearSegmentedColormap as LSC
# see if plotly is available
try:
__import__('plotly.tools')
imported_plotly = True
from plotly import tools
from plotly.offline import download_plotlyjs, offline
from plotly.graph_objs import Scatter, Layout, Figure,Scatter3d,Mesh3d,ColorBar,Contour
except ImportError:
imported_plotly = False
from datetime import datetime
####################################################################
##
class FilledMarker:
"""Filled marker user-settable values.
This class encapsulates a few variables describing a Filled marker.
Default values are provided that can be overridden in user plots.
Values relevant to filled makers are as follows:
| marker = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd']
| fillstyle = ['full', 'left', 'right', 'bottom', 'top', 'none']
| colour names = http://www.w3schools.com/html/html_colornames.asp
"""
def __init__(self, markerfacecolor=None, markerfacecoloralt=None,
markeredgecolor=None, marker=None, markersize=None,
fillstyle=None):
"""Define marker default values.
Args:
| markerfacecolor (colour): main colour for marker (optional)
| markerfacecoloralt (colour): alterive colour for marker (optional)
| markeredgecolor (colour): edge colour for marker (optional)
| marker (string): string to specify the marker (optional)
| markersize (int)): size of the marker (optional)
| fillstyle (string): string to define fill style (optional)
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
__all__ = ['__init__']
if markerfacecolor is None:
self.markerfacecolor = 'r'
else:
self.markerfacecolor = markerfacecolor
if markerfacecoloralt is None:
self.markerfacecoloralt = 'b'
else:
self.markerfacecoloralt = markerfacecoloralt
if markeredgecolor is None:
self.markeredgecolor = 'k'
else:
self.markeredgecolor = markeredgecolor
if marker is None:
self.marker = 'o'
else:
self.marker = marker
if markersize is None:
self.markersize = 20
else:
self.markersize = markersize
if fillstyle is None:
self.fillstyle = 'full'
else:
self.fillstyle = fillstyle
###################################################################################
###################################################################################
class Markers:
"""Collect marker location and types and mark subplot.
Build a list of markers at plot locations with the specified marker.
"""
####################################################################
##
def __init__(self, markerfacecolor = None, markerfacecoloralt = None,
markeredgecolor = None, marker = None, markersize = None,
fillstyle = None):
"""Set default marker values for this collection
Specify default marker properties to be used for all markers
in this instance. If no marker properties are specified here,
the default FilledMarker marker properties will be used.
Args:
| markerfacecolor (colour): main colour for marker (optional)
| markerfacecoloralt (colour): alternative colour for marker (optional)
| markeredgecolor (colour): edge colour for marker (optional)
| marker (string): string to specify the marker (optional)
| markersize (int)): size of the marker (optional)
| fillstyle (string): string to define fill style (optional)
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
__all__ = ['__init__', 'add', 'plot']
if markerfacecolor is None:
self.markerfacecolor = None
else:
self.markerfacecolor = markerfacecolor
if markerfacecoloralt is None:
self.markerfacecoloralt = None
else:
self.markerfacecoloralt = markerfacecoloralt
if markeredgecolor is None:
self.markeredgecolor = None
else:
self.markeredgecolor = markeredgecolor
if marker is None:
self.marker = None
else:
self.marker = marker
if markersize is None:
self.markersize = markersize
else:
self.markersize = markersize
if fillstyle is None:
self.fillstyle = None
else:
self.fillstyle = fillstyle
#list if markers to be drawn
self.markers = []
####################################################################
##
def add(self,x,y,markerfacecolor = None, markerfacecoloralt = None,
markeredgecolor = None, marker = None, markersize = None,
fillstyle = None):
"""Add a marker to the list, overridding properties if necessary.
Specify location and any specific marker properties to be used.
The location can be (xy,y) for cartesian plots or (theta,rad) for polars.
If no marker properties are specified, the current marker class
properties will be used. If the current maker instance does not
specify properties, the default marker properties will be used.
Args:
| x (float): the x/theta location for the marker
| y (float): the y/radial location for the marker
| markerfacecolor (colour): main colour for marker (optional)
| markerfacecoloralt (colour): alterive colour for marker (optional)
| markeredgecolor (colour): edge colour for marker (optional)
| marker (string): string to specify the marker (optional)
| markersize (int)): size of the marker (optional)
| fillstyle (string): string to define fill style (optional)
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
if markerfacecolor is None:
if self.markerfacecolor is not None:
markerfacecolor = self.markerfacecolor
if markerfacecoloralt is None:
if self.markerfacecoloralt is not None:
markerfacecoloralt = self.markerfacecoloralt
if markeredgecolor is None:
if self.markeredgecolor is not None:
markeredgecolor = self.markeredgecolor
if marker is None:
if self.marker is not None:
marker = self.marker
if markersize is None:
if self.markersize is not None:
markersize = self.markersize
if fillstyle is None:
if self.fillstyle is not None:
fillstyle = self.fillstyle
marker = FilledMarker(markerfacecolor, markerfacecoloralt ,
markeredgecolor , marker, markersize , fillstyle)
self.markers.append((x,y,marker))
####################################################################
##
def plot(self,ax):
"""Plot the current list of markers on the given axes.
All the markers currently stored in the class will be
drawn.
Args:
| ax (axes): an axes handle for the plot
Returns:
| Nothing. Creates the figure for subsequent use.
Raises:
| No exception is raised.
"""
usetex = plt.rcParams['text.usetex']
plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble
for marker in self.markers:
ax.plot(marker[0], marker[1],
color = marker[2].markerfacecolor,
markerfacecoloralt = marker[2].markerfacecoloralt,
markeredgecolor = marker[2].markeredgecolor,
marker = marker[2].marker,
markersize = marker[2].markersize,
fillstyle = marker[2].fillstyle,
linewidth=0)
plt.rcParams['text.usetex'] = usetex
###################################################################################
###################################################################################
class ProcessImage:
"""This class provides a functions to assist in the optimal display of images.
"""
#define the compression rule to be used in the equalisation function
compressSet = [
[lambda x : x , lambda x : x, 'Linear'],
[np.log, np.exp, 'Natural Log'],
[np.sqrt, np.square, 'Square Root']]
############################################################
def __init__(self):
"""Class constructor
Sets up some variables for use in this class
Args:
| None
Returns:
| Nothing
Raises:
| No exception is raised.
"""
__all__ = ['__init__', 'compressEqualizeImage', 'reprojectImageIntoPolar']
############################################################
def compressEqualizeImage(self, image, selectCompressSet=2, numCbarlevels=20,
cbarformat='.3f'):
"""Compress an image (and then inversely expand the color bar values),
prior to histogram equalisation to ensure that the two keep in step,
we store the compression function names as pairs, and invoke the
compression function as follows: linear, log. sqrt. Note that the
image is histogram equalised in all cases.
Args:
| image (np.ndarray): the image to be processed
| selectCompressSet (int): compression selection [0,1,2] (optional)
| numCbarlevels (int): number of labels in the colourbar (optional)
| cbarformat (string): colourbar label format, e.g., '10.3f', '.5e' (optional)
Returns:
| imgHEQ (np.ndarray): the equalised image array
| customticksz (zip(float, string)): colourbar levels and associated levels
Raises:
| No exception is raised.
"""
#compress the input image - rescale color bar tick to match below
#also collapse into single dimension
imgFlat = self.compressSet[selectCompressSet][0](image.flatten())
imgFlatSort = np.sort(imgFlat)
#cumulative distribution
cdf = imgFlatSort.cumsum()/imgFlatSort[-1]
#remap image values to achieve histogram equalisation
y=np.interp(imgFlat,imgFlatSort, cdf )
#and reshape to original image shape
imgHEQ = y.reshape(image.shape)
# #plot the histogram mapping
# minData = np.min(imgFlat)
# maxData = np.max(imgFlat)
# print('Image irradiance range minimum={0} maximum={1}'.format(minData, maxData))
# irradRange=np.linspace(minData, maxData, 100)
# normalRange = np.interp(irradRange,imgFlatSort, cdf )
# H = ryplot.Plotter(1, 1, 1,'Mapping Input Irradiance to Equalised Value',
# figsize=(10, 10))
# H.plot(1, "","Irradiance [W/(m$^2$)]", "Equalised value",irradRange,
# normalRange, powerLimits = [-4, 2, -10, 2])
# #H.getPlot().show()
# H.saveFig('cumhist{0}.png'.format(entry), dpi=300)
#prepare the color bar tick labels from image values (as plotted)
imgLevels = np.linspace(np.min(imgHEQ), np.max(imgHEQ), numCbarlevels)
#map back from image values to original values as read it (inverse to above)
irrLevels=np.interp(imgLevels,cdf, imgFlatSort)
#uncompress the tick labels - match with compression above
fstr = '{0:' + cbarformat + '}'
customticksz = list(zip(imgLevels, [fstr.format(self.compressSet[selectCompressSet][1](x)) for x in irrLevels]))
return imgHEQ, customticksz
##############################################################################
##
def reprojectImageIntoPolar(self, data, origin=None, framesFirst=True,cval=0.0):
"""Reprojects a 3D numpy array into a polar coordinate system, relative to some origin.
This function reprojects an image from cartesian to polar coordinates.
The origin of the new coordinate system defaults to the center of the image,
unless the user supplies a new origin.
The data format can be data.shape = (rows, cols, frames) or
data.shape = (frames, rows, cols), the format of which is indicated by the
framesFirst parameter.
The reprojectImageIntoPolar function maps radial to cartesian coords.
The radial image is however presented in a cartesian grid, the corners have no meaning.
The radial coordinates are mapped to the radius, not the corners.
This means that in order to map corners, the frequency is scaled with sqrt(2),
The corners are filled with the value specified in cval.
Args:
| data (np.array): 3-D array to which transformation must be applied.
| origin ( (x-orig, y-orig) ): data-coordinates of where origin should be placed
| framesFirst (bool): True if data.shape is (frames, rows, cols), False if
data.shape is (rows, cols, frames)
| cval (float): the fill value to be used in coords outside the mapped range(optional)
Returns:
| output (float np.array): transformed images/array data in the same sequence as input sequence.
| r_i (np.array[N,]): radial values for returned image.
| theta_i (np.array[M,]): angular values for returned image.
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
import pyradi.ryutils as ryutils
# import scipy as sp
import scipy.ndimage as spndi
if framesFirst:
data = ryutils.framesLast(data)
ny, nx = data.shape[:2]
if origin is None:
origin = (nx//2, ny//2)
# Determine what the min and max r and theta coords will be
x, y = ryutils.index_coords(data, origin=origin, framesFirst=framesFirst )
r, theta = ryutils.cart2polar(x, y)
# Make a regular (in polar space) grid based on the min and max r & theta
r_i = np.linspace(r.min(), r.max(), nx)
theta_i = np.linspace(theta.min(), theta.max(), ny)
theta_grid, r_grid = np.meshgrid(theta_i, r_i)
# Project the r and theta grid back into pixel coordinates
xi, yi = ryutils.polar2cart(r_grid, theta_grid)
xi += origin[0] # We need to shift the origin back to
yi += origin[1] # back to the lower-left corner...
xi, yi = xi.flatten(), yi.flatten()
coords = np.vstack((xi, yi)) # (map_coordinates requires a 2xn array)
# Reproject each band individually and the restack
# (uses less memory than reprojection the 3-dimensional array in one step)
bands = []
for band in data.T:
zi = spndi.map_coordinates(band, coords, order=1,cval=cval)
bands.append(zi.reshape((nx, ny)))
output = np.dstack(bands)
if framesFirst:
output = ryutils.framesFirst(output)
return output, r_i, theta_i
###################################################################################
###################################################################################
class Plotter:
""" Encapsulates a plotting environment, optimized for compact code.
This class provides a wrapper around Matplotlib to provide a plotting
environment specialised towards typical pyradi visualisation.
These functions were developed to provide sophisticated plots by entering
the various plot options on a few lines, instead of typing many commands.
Provision is made for plots containing subplots (i.e., multiple plots on
the same figure), linear scale and log scale plots, images, and cartesian,
3-D and polar plots.
"""
############################################################
##
def __init__(self,fignumber=0,subpltnrow=1,subpltncol=1,\
figuretitle=None, figsize=(9,9), titlefontsize=14,
useplotly = False,doWarning=True):
"""Class constructor
The constructor defines the number for this figure, allowing future reference
to this figure. The number of subplot rows and columns allow the user to define
the subplot configuration. The user can also provide a title to be
used for the figure (centred on top) and finally, the size of the figure in inches
can be specified to scale the text relative to the figure.
Args:
| fignumber (int): the plt figure number, must be supplied
| subpltnrow (int): subplot number of rows
| subpltncol (int): subplot number of columns
| figuretitle (string): the overall heading for the figure
| figsize ((w,h)): the figure size in inches
| titlefontsize (int): the figure title size in points
| useplotly (bool): Plotly activation parameter
| doWarning (bool): print warning messages to the screen
Returns:
| Nothing. Creates the figure for subequent use.
Raises:
| No exception is raised.
"""
__all__ = ['__init__', 'saveFig', 'getPlot', 'plot', 'logLog', 'semilogX',
'semilogY', 'polar', 'showImage', 'plot3d', 'buildPlotCol',
'getSubPlot', 'meshContour', 'nextPlotCol', 'plotArray',
'polarMesh', 'resetPlotCol', 'mesh3D', 'polar3d', 'labelSubplot',
'emptyPlot','setup_pie_axes','pie']
version=mpl.__version__.split('.')
vnum=float(version[0]+'.'+version[1])
if vnum<1.1:
print('Install Matplotlib 1.1 or later')
print('current version is {0}'.format(vnum))
sys.exit(-1)
self.figurenumber = fignumber
self.fig = plt.figure(self.figurenumber)
self.fig.set_size_inches(figsize[0], figsize[1])
self.fig.clear()
self.figuretitle = figuretitle
self.doWarning = doWarning
#Plotly variables initialization
self.useplotly = useplotly
if self.useplotly:
self.Plotlyfig = []
self.Plotlydata = []
self.Plotlylayout = []
self.PlotlyXaxisTitles = []
self.PlotlyYaxisTitles = []
self.PlotlySubPlotTitles = []
self.PlotlySubPlotLabels = []
self.PlotlySubPlotNumbers = []
self.PlotlyPlotCalls = 0
self.PLcolor=''
self.PLwidth=0
self.PLdash=''
self.PLmultiAxisTitle=''
self.PLmultipleYAxis=False
self.PLyAxisSide=''
self.PLyAxisOverlaying=''
self.PLmultipleXAxis=False
self.PLxAxisSide=''
self.PLxAxisOverlaying=''
self.PLIs3D=False
self.PLType=''
self.nrow=subpltnrow
self.ncol=subpltncol
# width reserved for space between subplots
self.fig.subplots_adjust(wspace=0.25)
#height reserved for space between subplots
self.fig.subplots_adjust(hspace=0.4)
#height reserved for top of the subplots of the figure
self.fig.subplots_adjust(top=0.88)
# define the default line colour and style
self.buildPlotCol(plotCol=None, n=None)
self.bbox_extra_artists = []
self.subplots = {}
self.gridSpecsOuter = {}
self.arrayRows = {}
self.gridSpecsInner = {}
if figuretitle:
self.figtitle=plt.gcf().text(.5,.95,figuretitle,\
horizontalalignment='center',\
fontproperties=FontProperties(size=titlefontsize))
self.bbox_extra_artists.append(self.figtitle)
############################################################
##
def buildPlotCol(self, plotCol=None, n=None):
"""Set a sequence of default colour styles of
appropriate length.
The constructor provides a sequence with length
14 pre-defined plot styles.
The user can define a new sequence if required.
This function modulus-folds either sequence, in
case longer sequences are required.
Colours can be one of the basic colours:
['b', 'g', 'r', 'c', 'm', 'y', 'k']
or it can be a gray shade float value between 0 and 1,
such as '0.75', or it can be in hex format '#eeefff'
or it can be one of the legal html colours.
See http://html-color-codes.info/ and
http://www.computerhope.com/htmcolor.htm.
http://latexcolor.com/
Args:
| plotCol ([strings]): User-supplied list
| of plotting styles(can be empty []).
| n (int): Length of required sequence.
Returns:
| A list with sequence of plot styles, of required length.
Raises:
| No exception is raised.
"""
# assemble the list as requested, use default if not specified
if plotCol is None:
plotCol = ['b', 'g', 'r', 'c', 'm', 'y', 'k',
'#5D8AA8','#E52B50','#FF7E00','#9966CC','#CD9575','#915C83',
'#008000','#4B5320','#B2BEB5','#A1CAF1','#FE6F5E','#333399',
'#DE5D83','#800020','#1E4D2B','#00BFFF','#007BA7','#FFBCD9']
if n is None:
n = len(plotCol)
self.plotCol = [plotCol[i % len(plotCol)] for i in range(n)]
# copy this to circular list as well
self.plotColCirc = itertools.cycle(self.plotCol)
return self.plotCol
############################################################
##
def nextPlotCol(self):
"""Returns the next entry in a sequence of default
plot line colour styles in circular list.
One day I want to do this with a generator....
Args:
| None
Returns:
| The next plot colour in the sequence.
Raises:
| No exception is raised.
"""
col = next(self.plotColCirc)
return col
############################################################
##
def resetPlotCol(self):
"""Resets the plot colours to start at the beginning of
the cycle.
Args:
| None
Returns:
| None.
Raises:
| No exception is raised.
"""
self.plotColCirc = itertools.cycle(self.plotCol)
############################################################
##
def saveFig(self, filename='mpl.png',dpi=300,bbox_inches='tight',\
pad_inches=0.1, useTrueType = True):
"""Save the plot to a disk file, using filename, dpi specification and bounding box limits.
One of matplotlib's design choices is a bounding box strategy which may result in a bounding box
that is smaller than the size of all the objects on the page. It took a while to figure this out,
but the current default values for bbox_inches and pad_inches seem to create meaningful
bounding boxes. These are however larger than the true bounding box. You still need a
tool such as epstools or Adobe Acrobat to trim eps files to the true bounding box.
The type of file written is picked up in the filename.
Most backends support png, pdf, ps, eps and svg.
Args:
| filename (string): output filename to write plot, file ext
| dpi (int): the resolution of the graph in dots per inch
| bbox_inches: see matplotlib docs for more detail.
| pad_inches: see matplotlib docs for more detail.
| useTrueType: if True, truetype fonts are used in eps/pdf files, otherwise Type3
Returns:
| Nothing. Saves a file to disk.
Raises:
| No exception is raised.
"""
# http://matplotlib.1069221.n5.nabble.com/TrueType-font-embedding-in-eps-problem-td12691.html
# http://stackoverflow.com/questions/5956182/cannot-edit-text-in-chart-exported-by-matplotlib-and-opened-in-illustrator
# http://newsgroups.derkeiler.com/Archive/Comp/comp.soft-sys.matlab/2008-07/msg02038.html
if useTrueType:
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
#http://stackoverflow.com/questions/15341757/how-to-check-that-pylab-backend-of-matplotlib-runs-inline/17826459#17826459
# print(mpl.get_backend())
if 'inline' in mpl.get_backend() and self.doWarning:
print('**** If saveFig does not work inside the notebook please comment out the line "%matplotlib inline" ')
print('To disable ryplot warnings, set doWarning=False')
# return
if len(filename)>0:
if self.bbox_extra_artists:
self.fig.savefig(filename, dpi=dpi, bbox_inches=bbox_inches,
pad_inches=pad_inches,\
bbox_extra_artists= self.bbox_extra_artists);
else:
self.fig.savefig(filename, dpi=dpi, bbox_inches=bbox_inches,
pad_inches=pad_inches);
############################################################
##
def getPlot(self):
"""Returns a handle to the current figure
Args:
| None
Returns:
| A handle to the current figure.
Raises:
| No exception is raised.
"""
return self.fig
############################################################
##
def labelSubplot(self, spax, ptitle=None, xlabel=None, ylabel=None, zlabel=None,
titlefsize=10, labelfsize=10, ):
"""Set the sub-figure title and axes labels (cartesian plots only).
Args:
| spax (handle): subplot axis handle where labels must be drawn
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| zlabel (string): z axis label (optional)
| titlefsize (float): title fontsize (optional)
| labelfsize (float): x,y,z label fontsize (optional)
Returns:
| None.
Raises:
| No exception is raised.
"""
if xlabel is not None:
spax.set_xlabel(xlabel,fontsize=labelfsize)
if ylabel is not None:
spax.set_ylabel(ylabel,fontsize=labelfsize)
if zlabel is not None:
spax.set_ylabel(zlabel,fontsize=labelfsize)
if ptitle is not None:
spax.set_title(ptitle,fontsize=titlefsize)
############################################################
##
def getSubPlot(self, subplotNum = 1):
"""Returns a handle to the subplot, as requested per subplot number.
Subplot numbers range from 1 upwards.
Args:
| subplotNumer (int) : number of the subplot
Returns:
| A handle to the requested subplot or None if not found.
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
return self.subplots[(self.nrow,self.ncol, subplotNum)]
else:
return None
############################################################
##
def getXLim(self, subplotNum = 1):
"""Returns the x limits of the current subplot.
Subplot numbers range from 1 upwards.
Args:
| subplotNumer (int) : number of the subplot
Returns:
| An array with the two limits
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
return np.asarray(self.subplots[(self.nrow,self.ncol, subplotNum)].get_xlim())
else:
return None
############################################################
##
def getYLim(self, subplotNum = 1):
"""Returns the y limits of the current subplot.
Subplot numbers range from 1 upwards.
Args:
| subplotNumer (int) : number of the subplot
Returns:
| An array with the two limits
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
return np.asarray(self.subplots[(self.nrow,self.ncol, subplotNum)].get_ylim())
else:
return None
############################################################
##
def verticalLineCoords(self,subplotNum=1,x=0):
"""Returns two arrays for vertical line at x in the specific subplot.
The line is drawn at specified x, with current y limits in subplot.
Subplot numbers range from 1 upwards.
Use as follows to draw a vertical line in plot:
p.plot(1,*p.verticalLineCoords(subplotNum=1,x=freq),plotCol=['k'])
Args:
| subplotNumer (int) : number of the subplot
| x (double): horizontal value used for line
Returns:
| A tuple with two arrays for line (x-coords,y-coords)
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
handle = self.subplots[(self.nrow,self.ncol, subplotNum)]
x = np.asarray((x,x))
y = self.getYLim(subplotNum)
return x,y
else:
return None
############################################################
##
def horizontalLineCoords(self,subplotNum=1,y=0):
"""Returns two arrays for horizontal line at y in the specific subplot.
The line is drawn at specified y, with current x limits in subplot.
Subplot numbers range from 1 upwards.
Use as follows to draw a horizontal line in plot:
p.plot(1,*p.horizontalLineCoords(subplotNum=1,x=freq),plotCol=['k'])
Args:
| subplotNumer (int) : number of the subplot
| y (double): horizontal value used for line
Returns:
| A tuple with two arrays for line (x-coords,y-coords)
Raises:
| No exception is raised.
"""
if (self.nrow,self.ncol, subplotNum) in list(self.subplots.keys()):
handle = self.subplots[(self.nrow,self.ncol, subplotNum)]
y = np.asarray((y,y))
x = self.getXLim(subplotNum)
return x,y
else:
return None
############################################################
##
def plot(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[], legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10, labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True,axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None, PLxAxisSide=None,
PLxAxisOverlaying=None, PLmultipleXAxis=False ): #Plotly initialization parameters
"""Cartesian plot on linear scales for abscissa and ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
| PLxAxisSide (string): Sets the location of the x-axis (top/bottom)
| PLxAxisOverlaying (string): Sets the overlaying
| PLmultipleXAxis (bool): Indicates presence of multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
self.PLmultipleXAxis=PLmultipleXAxis
self.PLxAxisSide=PLxAxisSide
self.PLxAxisOverlaying=PLxAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.plot, plotnum, x, y, ptitle, xlabel, ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits,titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,axesequal,
xAxisFmt,yAxisFmt)
return ax
############################################################
##
def logLog(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10,labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True,axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot data on logarithmic scales for abscissa and ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
# Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
# self.myPlot(ax.loglog, plotnum, x, y, ptitle, xlabel,ylabel,\
# plotCol, label,legendAlpha, pltaxis, \
# maxNX, maxNY, linestyle, powerLimits,titlefsize,xylabelfsize,
# xytickfsize,labelfsize, drawGrid
# xTicks, xtickRotation,
# markers=markers)
self.myPlot(ax.loglog, plotnum, x, y, ptitle, xlabel, ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits,titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,axesequal,
xAxisFmt,yAxisFmt)
return ax
############################################################
##
def semilogX(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10,labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True, axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot data on logarithmic scales for abscissa and linear scale for ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label notation power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.semilogx, plotnum, x, y, ptitle, xlabel, ylabel,\
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits, titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,axesequal,
xAxisFmt,yAxisFmt)
return ax
############################################################
##
def semilogY(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10, labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True,axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot data on linear scales for abscissa and logarithmic scale for ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list withM entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.semilogy, plotnum, x, y, ptitle,xlabel,ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits, titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,
axesequal,xAxisFmt,yAxisFmt)
return ax
############################################################
##
def stackplot(self, plotnum, x, y, ptitle=None, xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[],legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=10, maxNY=10, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10, labelfsize=10,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, drawGrid=True,xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None, markerfacecolor=True,markeredgecolor=True,
zorders=None, clip_on=True, axesequal=False,
xAxisFmt=None, yAxisFmt=None,
PLcolor=None,
PLwidth=None, PLdash=None, PLyAxisSide=None, PLyAxisOverlaying=None,
PLmultipleYAxis=False, PLmultiAxisTitle=None):
"""Plot stacked data on linear scales for abscissa and ordinates.
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the y-values
or ordinates can be more than one column, each column representing a different
line in the plot. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The pltaxis argument defines the min/max scale values
for the x and y axes.
Args:
| plotnum (int): subplot number, 1-based index
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| ptitle (string): plot title (optional)
| xlabel (string): x-axis label (optional)
| ylabel (string): y-axis label (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list withM entries (optional)
| legendAlpha (float): transparency for legend box (optional)
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional) (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLcolor (string): graph color scheme. Format 'rgb(r,g,b)'
| PLwidth
| PLdash (string): Line stlye
| PLyAxisSide (string): Sets the location of the y-axis (left/right)
| PLyAxisOverlaying (string): Sets the overlaying
| PLmultipleYAxis (bool): Indicates presence of multiple axis
| PLmultiAxisTitle (string): Sets the title of the multiple axis
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Plotly variables initialization
self.PLcolor=PLcolor
self.PLwidth=PLwidth
self.PLdash=PLdash
self.PLmultipleYAxis=PLmultipleYAxis
self.PLmultiAxisTitle=PLmultiAxisTitle
self.PLyAxisSide=PLyAxisSide
self.PLyAxisOverlaying=PLyAxisOverlaying
## see self.MyPlot for parameter details.
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
ax = self.subplots[pkey]
self.myPlot(ax.stackplot, plotnum, x, y.T, ptitle,xlabel,ylabel,
plotCol, linewidths, label,legendAlpha, legendLoc,
pltaxis, maxNX, maxNY, linestyle,
powerLimits, titlefsize,
xylabelfsize, xytickfsize,
labelfsize, drawGrid,
xScientific, yScientific,
yInvert, xInvert, xIsDate,
xTicks, xtickRotation,
markers, markevery, markerfacecolor,markeredgecolor,
zorders, clip_on,
axesequal,xAxisFmt,yAxisFmt)
return ax
############################################################
##
def myPlot(self, plotcommand,plotnum, x, y, ptitle=None,xlabel=None, ylabel=None,
plotCol=[], linewidths=None, label=[], legendAlpha=0.0,
legendLoc='best',
pltaxis=None, maxNX=0, maxNY=0, linestyle=None,
powerLimits = [-4, 2, -4, 2], titlefsize = 12,
xylabelfsize = 12, xytickfsize = 10,
labelfsize=10, drawGrid=True,
xScientific=False, yScientific=False,
yInvert=False, xInvert=False, xIsDate=False,
xTicks=None, xtickRotation=0,
markers=[], markevery=None,
markerfacecolor=True,markeredgecolor=True,
zorders=None,clip_on=True,axesequal=False,
xAxisFmt=None,yAxisFmt=None,
PLyStatic=[0]
):
"""Low level helper function to create a subplot and plot the data as required.
This function does the actual plotting, labelling etc. It uses the plotting
function provided by its user functions.
lineStyles = {
'': '_draw_nothing',
' ': '_draw_nothing',
'None': '_draw_nothing',
'--': '_draw_dashed',
'-.': '_draw_dash_dot',
'-': '_draw_solid',
':': '_draw_dotted'}
Args:
| plotcommand: name of a MatplotLib plotting function
| plotnum (int): subplot number, 1-based index
| ptitle (string): plot title
| xlabel (string): x axis label
| ylabel (string): y axis label
| x (np.array[N,] or [N,1]): abscissa
| y (np.array[N,] or [N,M]): ordinates - could be M columns
| plotCol ([strings]): plot colour and line style, list with M entries, use default if []
| linewidths ([float]): plot line width in points, list with M entries, use default if None (optional)
| label ([strings]): legend label for ordinate, list with M entries
| legendAlpha (float): transparency for legend box
| legendLoc (string): location for legend box (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None.
| maxNX (int): draw maxNX+1 tick labels on x axis
| maxNY (int): draw maxNY+1 tick labels on y axis
| linestyle (string): linestyle for this plot (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| labelfsize (int): label/legend font size, default 10pt (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| yInvert (bool): invert the y-axis (optional)
| xInvert (bool): invert the x-axis (optional)
| xIsDate (bool): convert the datetime x-values to dates (optional)
| xTicks ({tick:label}): dict of x-axis tick locations and associated labels (optional)
| xtickRotation (float) x-axis tick label rotation angle (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
| markerfacecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| markeredgecolor (True|None|str) if True same as plotCol, if None empty, otherwise str is colour (optional)
| zorders ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| axesequal (bool) force scaling on x and y axes to be equal (optional)
| xAxisFmt (string) x-axis format string, e.g., '%.2f', default None (optional)
| yAxisFmt (string) y-axis format string, e.g., '%.2e',, default None (optional)
| PLyStatic ([int]) the guy that added this did not document it properly
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#Initialize plotlyPlot call when Plotly is activated
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
if x.ndim>1:
xx=x
else:
if type(x)==type(pd.Series()):
x = x.values
xx=x.reshape(-1, 1)
if y.ndim>1:
yy=y
else:
if type(y)==type(pd.Series()):
y = y.values
yy=y.reshape(-1, 1)
# plotCol = self.buildPlotCol(plotCol, yy.shape[1])
pkey = (self.nrow, self.ncol, plotnum)
ax = self.subplots[pkey]
if drawGrid:
ax.grid(True)
else:
ax.grid(False)
# use scientific format on axes
#yfm = sbp.yaxis.get_major_formatter()
#yfm.set_powerlimits([ -3, 3])
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
if xIsDate:
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_major_locator(mdates.DayLocator())
if maxNX >0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY >0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
if xScientific:
# formx = plt.FormatStrFormatter('%.3e')
formx = plt.ScalarFormatter()
formx.set_powerlimits([powerLimits[0], powerLimits[1]])
formx.set_scientific(True)
ax.xaxis.set_major_formatter(formx)
# http://matplotlib.1069221.n5.nabble.com/ScalarFormatter-td28042.html
# http://matplotlib.org/api/ticker_api.html
# http://matplotlib.org/examples/pylab_examples/newscalarformatter_demo.html
# ax.xaxis.set_major_formatter( plt.FormatStrFormatter('%d'))
# http://matplotlib.org/1.3.1/api/axes_api.html#matplotlib.axes.Axes.ticklabel_format
# plt.ticklabel_format(style='sci', axis='x',
# scilimits=(powerLimits[0], powerLimits[1]))
if yScientific:
formy = plt.ScalarFormatter()
formy.set_powerlimits([powerLimits[2], powerLimits[3]])
formy.set_scientific(True)
ax.yaxis.set_major_formatter(formy)
# this user-defined format setting is given at the end of the function.
# # override the format with user defined
# if xAxisFmt is not None:
# ax.xaxis.set_major_formatter(FormatStrFormatter(xAxisFmt))
# if yAxisFmt is not None:
# ax.yaxis.set_major_formatter(FormatStrFormatter(yAxisFmt))
###############################stacked plot #######################
if plotcommand==ax.stackplot:
if not self.useplotly:
if not plotCol:
plotCol = [self.nextPlotCol() for col in range(0,yy.shape[0])]
ax.stackplot(xx.reshape(-1), yy, colors=plotCol)
ax.margins(0, 0) # Set margins to avoid "whitespace"
# creating the legend manually
ax.legend([mpl.patches.Patch(color=col) for col in plotCol], label,
loc=legendLoc, framealpha=legendAlpha)
else: #Plotly stacked plot
#Plotly stacked plot variables
PLXAxis = 0
PLYAxis = 0
for i in range(yy.shape[0]):
PLXAxis = dict(type='category',)
PLYAxis = dict(type='linear')
try:
if len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x, y=y[i,:]+PLyStatic[0],mode='lines', name = label,fill='tonexty',line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
PLyStatic[0] += y[i,:]
elif len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y,mode='lines', name = label,fill='tonexty',line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,fill='tonexty',line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
###############################line plot #######################
else: # not a stacked plot
for i in range(yy.shape[1]):
#set up the line style, either given or next in sequence
mmrk = ''
if markers:
if i >= len(markers):
mmrk = markers[-1]
else:
mmrk = markers[i]
if plotCol:
if i >= len(plotCol):
col = plotCol[-1]
else:
col = plotCol[i]
else:
col = self.nextPlotCol()
if markerfacecolor==True:
markerfacecolor = col
elif markerfacecolor is None:
markerfacecolor='none'
else:
pass # keep as is
if markeredgecolor==True:
markeredgecolor = col
elif markeredgecolor is None:
markeredgecolor='none'
else:
pass # keep as is
if linestyle is None:
linestyleL = '-'
else:
if type(linestyle) == type([1]):
linestyleL = linestyle[i]
else:
linestyleL = linestyle
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if not self.useplotly:
if not label:
if linewidths is not None:
plotcommand(xx, yy[:, i], col, label=None, linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery, linewidth=linewidths[i],
clip_on=clip_on, zorder=zorder)
else:
plotcommand(xx, yy[:, i], col, label=None, linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery,
clip_on=clip_on, zorder=zorder)
else:
if linewidths is not None:
# print('***************',linewidths)
line, = plotcommand(xx,yy[:,i],col,#label=label[i],
linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery, linewidth=linewidths[i],
clip_on=clip_on, zorder=zorder)
else:
line, = plotcommand(xx,yy[:,i],col,#label=label[i],
linestyle=linestyleL,
markerfacecolor=markerfacecolor,markeredgecolor=markeredgecolor,
marker=mmrk, markevery=markevery,
clip_on=clip_on, zorder=zorder)
line.set_label(label[i])
leg = ax.legend( loc=legendLoc, fancybox=True,fontsize=labelfsize)
leg.get_frame().set_alpha(legendAlpha)
# ax.legend()
self.bbox_extra_artists.append(leg)
else:#Plotly plots
if 'loglog' in str(plotcommand):
PLXAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif 'semilogx' in str(plotcommand):
PLXAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif 'semilogy' in str(plotcommand):
PLXAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(type='log',showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
else:
PLXAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=xlabel,mirror='all')
PLYAxis = dict(showgrid=drawGrid,zeroline=False,nticks=20,showline=True,title=ylabel,mirror='all')
# Assuming that either y or x has to 1
try:
if len(x[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,i], y=y, name = label,xaxis='x1',
line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
elif len(y[0,:]) > 1:
self.Plotlydata.append(Scatter(x=x[:,0], y=y[:,i], name = label,xaxis='x1',
line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
except:
self.Plotlydata.append(Scatter(x=x, y=y, name = label,xaxis='x1',
line = dict(color = self.PLcolor, width = self.PLwidth, dash = self.PLdash)))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
#Plotly plots setup
if self.useplotly:
if self.PLmultipleYAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,xaxis = PLXAxis,yaxis=PLYAxis,yaxis2=dict(title=self.PLmultiAxisTitle,side=self.PLyAxisSide,overlaying=self.PLyAxisOverlaying)))
elif self.PLmultipleXAxis:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,yaxis=PLYAxis,xaxis = PLXAxis,xaxis2=dict(title=self.PLmultiAxisTitle,side=self.PLxAxisSide,overlaying=self.PLxAxisOverlaying)))
else:
self.Plotlylayout.append(Layout(showlegend = True,title = ptitle,xaxis = PLXAxis,yaxis=PLYAxis))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlySubPlotLabels.append(label)
if xIsDate:
plt.gcf().autofmt_xdate()
#scale the axes
if pltaxis is not None:
# ax.axis(pltaxis)
if not xIsDate:
ax.set_xlim(pltaxis[0],pltaxis[1])
ax.set_ylim(pltaxis[2],pltaxis[3])
if xTicks is not None:
ticks = ax.set_xticks(list(xTicks.keys()))
ax.set_xticklabels([xTicks[key] for key in xTicks],
rotation=xtickRotation, fontsize=xytickfsize)
if xTicks is None and xtickRotation is not None:
ticks = ax.get_xticks()
if xIsDate:
from datetime import date
ticks = [date.fromordinal(int(tick)).strftime('%Y-%m-%d') for tick in ticks]
ax.set_xticks(ticks) # this is workaround for bug in matplotlib
ax.set_xticklabels(ticks,
rotation=xtickRotation, fontsize=xytickfsize)
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# minor ticks are two points smaller than major
ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
if axesequal:
ax.axis('equal')
# override the format with user defined
if xAxisFmt is not None:
ax.xaxis.set_major_formatter(FormatStrFormatter(xAxisFmt))
if yAxisFmt is not None:
ax.yaxis.set_major_formatter(FormatStrFormatter(yAxisFmt))
return ax
############################################################
#Before this function is called, plot data is accumulated in runtime variables
#At the call of this function the Plotly plots are plotted using the accumulated data.
def plotlyPlot(self,filename=None,image=None,image_filename=None,auto_open=True):
if ((self.nrow == self.ncol) & self.ncol == 1 & self.nrow == 1 ): #No subplots
fig = Figure(data=self.Plotlydata,layout=self.Plotlylayout[0])
fig['layout'].update(title=str(self.figuretitle))
else:
dataFormatCatch = 0
try:
len(self.Plotlydata[0].y[1,:])
dataFormatCatch = 0
except:
dataFormatCatch = 1
if self.PLIs3D:
specRow = []
specCol = []
for r in range(int(self.nrow)):
specRow.append({'is_3d': True})
for r in range(int(self.ncol)):
specCol.append({'is_3d': True})
fig = tools.make_subplots(rows=int(self.nrow), cols=int(self.nrow), specs=[specRow,specCol])#[[{'is_3d': True}, {'is_3d': True}], [{'is_3d': True}, {'is_3d': True}]])
else:
fig = tools.make_subplots(int(self.nrow), int(self.ncol), subplot_titles=self.PlotlySubPlotTitles)
# make row and column formats
rowFormat = []
colFormat = []
countRows = 1
rowCount = 1
colCount = 1
for tmp in range(int(self.nrow)*int(self.ncol)):
if int(self.nrow) == int(self.ncol):
if countRows == int(self.nrow):
rowFormat.append(rowCount)
rowCount = rowCount + 1
if rowCount > int(self.nrow):
rowCount = 1
countRows = 1
elif countRows < int(self.nrow) :
rowFormat.append(rowCount)
countRows = countRows + 1
if colCount == int(self.ncol):
colFormat.append(colCount)
colCount = 1
elif colCount < int(self.ncol):
colFormat.append(colCount)
colCount = colCount + 1
else:
if rowCount > int(self.nrow):
rowCount = 1
rowFormat.append(rowCount)
rowCount = rowCount + 1
else:
rowFormat.append(rowCount)
rowCount = rowCount + 1
if colCount > int(self.ncol):
colCount = 1
colFormat.append(colCount)
colCount = colCount + 1
else:
colFormat.append(colCount)
colCount = colCount + 1
if dataFormatCatch == 0:
for tmp in range(self.PlotlyPlotCalls):
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[tmp], colFormat[tmp])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[tmp], colFormat[tmp])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata, rowFormat[tmp], colFormat[tmp])
else:
rCntrl = 1
rIndex = 1
cIndex = 1
cCntrl = 1
rStpVal = int(len(self.Plotlydata)/len(rowFormat))
cStpVal = int(len(self.Plotlydata)/len(colFormat))
for i in range(len(self.Plotlydata)):
if rCntrl > rStpVal:
rCntrl = 1
rIndex = rIndex+1
if cCntrl > cStpVal:
cCntrl = 1
cIndex = cIndex+1
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if(len(self.Plotlydata) == len(rowFormat)):
fig.append_trace(self.Plotlydata[i], rowFormat[i], colFormat[i])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[self.PlotlySubPlotNumbers[i]-1], colFormat[self.PlotlySubPlotNumbers[i]-1])
cCntrl = cCntrl + 1
else:
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
rCntrl = rCntrl + 1
elif cCntrl > cStpVal:
cCntrl = 1
cIndex = cIndex+1
if rCntrl > rStpVal:
rCntrl = 1
rIndex = rIndex+1
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
rCntrl = rCntrl + 1
else:
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
cCntrl = cCntrl + 1
else:
if self.PLIs3D:
if str(self.PLType) == "plot3d":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,mode=self.Plotlydata[i].mode), rowFormat[rIndex-1], colFormat[cIndex-1])
elif str(self.PLType) == "mesh3D":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,color=self.Plotlydata[i].color), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
if str(self.PLType) == "meshContour":
fig.append_trace(dict(type=self.Plotlydata[i].type,x=self.Plotlydata[i].x, y=self.Plotlydata[i].y, z=self.Plotlydata[i].z,name=self.Plotlydata[i].name,PLcolorscale=self.Plotlydata[i].PLcolorscale), rowFormat[rIndex-1], colFormat[cIndex-1])
else:
fig.append_trace(self.Plotlydata[i], rowFormat[rIndex-1], colFormat[cIndex-1])
rCntrl = rCntrl + 1
cCntrl = cCntrl + 1
fig['layout'].update(title=str(self.figuretitle))
for j in range(self.PlotlyPlotCalls):
if j < len(self.PlotlyXaxisTitles):
fig['layout']['xaxis'+str(j+1)].update(title=self.PlotlyXaxisTitles[j],type=self.Plotlylayout[j].xaxis.type)
else:
fig['layout']['xaxis'+str(j+1)].update(type=self.Plotlylayout[j].xaxis.type)
if j < len(self.PlotlyYaxisTitles):
fig['layout']['yaxis'+str(j+1)].update(title=self.PlotlyYaxisTitles[j],type=self.Plotlylayout[j].yaxis.type)
else:
fig['layout']['yaxis'+str(j+1)].update(type=self.Plotlylayout[j].yaxis.type)
if filename:
offline.plot(fig,filename=filename)
elif image:
offline.plot(fig,image_filename=image_filename,image=image,auto_open=auto_open)
else:
offline.plot(fig)
############################################################
##
def emptyPlot(self,plotnum,projection='rectilinear'):
"""Returns a handler to an empty plot.
This function does not do any plotting, the use must add plots using
the standard MatPlotLib means.
Args:
| plotnum (int): subplot number, 1-based index
| rectilinear (str): type of axes projection, from
['aitoff', 'hammer', 'lambert', 'mollweide', 'polar', 'rectilinear.].
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
pkey = (self.nrow, self.ncol, plotnum)
if pkey not in list(self.subplots.keys()):
self.subplots[pkey] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum,projection=projection)
ax = self.subplots[pkey]
return ax
############################################################
##
def meshContour(self, plotnum, xvals, yvals, zvals, levels=10,
ptitle=None, xlabel=None, ylabel=None, shading='flat',
plotCol=[], pltaxis=None, maxNX=0, maxNY=0,
xScientific=False, yScientific=False,
powerLimits=[-4, 2, -4, 2], titlefsize=12,
xylabelfsize=12, xytickfsize=10,
meshCmap=cm.rainbow, cbarshow=False, cbarorientation='vertical',
cbarcustomticks=[], cbarfontsize=12,
drawGrid=False, yInvert=False, xInvert=False,
contourFill=True, contourLine=True, logScale=False,
negativeSolid=False, zeroContourLine=None,
contLabel=False, contFmt='%.2f', contCol='k', contFonSz=8, contLinWid=0.5,
zorders=None, PLcolorscale='' ):
"""XY colour mesh countour plot for (xvals, yvals, zvals) input sets.
The data values must be given on a fixed mesh grid of three-dimensional
$(x,y,z)$ array input sets. The mesh grid is defined in $(x,y)$, while the height
of the mesh is the $z$ value.
Given an existing figure, this function plots in a specified subplot position.
Only one contour plot is drawn at a time. Future contours in the same subplot
will cover any previous contours.
The data set must have three two dimensional arrays, each for x, y, and z.
The data in x, y, and z arrays must have matching data points. The x and y arrays
each define the grid in terms of x and y values, i.e., the x array contains the
x values for the data set, while the y array contains the y values. The z array
contains the z values for the corresponding x and y values in the contour mesh.
Z-values can be plotted on a log scale, in which case the colourbar is adjusted
to show true values, but on the nonlinear scale.
The current version only saves png files, since there appears to be a problem
saving eps files.
The xvals and yvals vectors may have non-constant grid-intervals, i.e., they do not
have to be on regular intervals.
Args:
| plotnum (int): subplot number, 1-based index
| xvals (np.array[N,M]): array of x values
| yvals (np.array[N,M]): array of y values
| zvals (np.array[N,M]): values on a (x,y) grid
| levels (int or [float]): number of contour levels or a list of levels (optional)
| ptitle (string): plot title (optional)
| xlabel (string): x axis label (optional)
| ylabel (string): y axis label (optional)
| shading (string): not used currently (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if [] (optional)
| pltaxis ([xmin, xmax, ymin,ymax]): scale for x,y axes. Let Matplotlib decide if None. (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| powerLimits[float]: scientific tick label power limits [x-low, x-high, y-low, y-high] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis tick font size, default 10pt (optional)
| meshCmap (cm): colour map for the mesh (optional)
| cbarshow (bool): if true, the show a colour bar (optional)
| cbarorientation (string): 'vertical' (right) or 'horizontal' (below) (optional)
| cbarcustomticks zip([z values/float],[tick labels/string])` define custom colourbar ticks locations for given z values(optional)
| cbarfontsize (int): font size for colour bar (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| yInvert (bool): invert the y-axis. Flip the y-axis up-down (optional)
| xInvert (bool): invert the x-axis. Flip the x-axis left-right (optional)
| contourFill (bool): fill contours with colour (optional)
| contourLine (bool): draw a series of contour lines (optional)
| logScale (bool): do Z values on log scale, recompute colourbar values (optional)
| negativeSolid (bool): draw negative contours in solid lines, dashed otherwise (optional)
| zeroContourLine (double): draw a single contour at given value (optional)
| contLabel (bool): label the contours with values (optional)
| contFmt (string): contour label c-printf format (optional)
| contCol (string): contour label colour, e.g., 'k' (optional)
| contFonSz (float): contour label fontsize (optional)
| contLinWid (float): contour line width in points (optional)
| zorders ([int]) list of zorders for drawing sequence, highest is last (optional)
| PLcolorscale (?) Plotly parameter ? (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
#to rank 2
xx=xvals.reshape(-1, 1)
yy=yvals.reshape(-1, 1)
#if this is a log scale plot
if logScale is True:
zvals = np.log10(zvals)
contour_negative_linestyle = plt.rcParams['contour.negative_linestyle']
if contourLine:
if negativeSolid:
plt.rcParams['contour.negative_linestyle'] = 'solid'
else:
plt.rcParams['contour.negative_linestyle'] = 'dashed'
#create subplot if not existing
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum)
#get axis
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if drawGrid:
ax.grid(True)
else:
ax.grid(False)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if xScientific:
formx = plt.ScalarFormatter()
formx.set_scientific(True)
formx.set_powerlimits([powerLimits[0], powerLimits[1]])
ax.xaxis.set_major_formatter(formx)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
if yScientific:
formy = plt.ScalarFormatter()
formy.set_powerlimits([powerLimits[2], powerLimits[3]])
formy.set_scientific(True)
ax.yaxis.set_major_formatter(formy)
if maxNX >0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY >0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
if plotCol:
col = plotCol[0]
else:
col = self.nextPlotCol()
if zorders is not None:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 2
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
self.PLType = "meshContour"
if cbarshow:
self.Plotlydata.append(Contour(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),
PLcolorscale=PLcolorscale))
#,color=color,colorbar = ColorBar(PLtickmode=PLtickmode,nticks=PLnticks,
# PLtick0=PLtick0,PLdtick=PLdtick,PLtickvals=PLtickvals,PLticktext=PLticktext),
# PLcolorscale = PLcolorScale,intensity = PLintensity))
else:
self.Plotlydata.append(Contour(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),PLcolorscale=PLcolorscale))
#,color=color))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
#do the plot
if contourFill:
pmplotcf = ax.contourf(xvals, yvals, zvals, levels,
cmap=meshCmap, zorder=zorder)
if contourLine:
pmplot = ax.contour(xvals, yvals, zvals, levels, cmap=None, linewidths=contLinWid,
colors=col, zorder=zorder)
if zeroContourLine:
pmplot = ax.contour(xvals, yvals, zvals, (zeroContourLine,), cmap=None, linewidths=contLinWid,
colors=col, zorder=zorder)
if contLabel: # and not contourFill:
plt.clabel(pmplot, fmt = contFmt, colors = contCol, fontsize=contFonSz) #, zorder=zorder)
if cbarshow and (contourFill):
#http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
divider = make_axes_locatable(ax)
if cbarorientation == 'vertical':
cax = divider.append_axes("right", size="5%", pad=0.05)
else:
cax = divider.append_axes("bottom", size="5%", pad=0.1)
if not cbarcustomticks:
# cbar = self.fig.colorbar(pmplotcf,orientation=cbarorientation)
cbar = self.fig.colorbar(pmplotcf,cax=cax)
if logScale:
cbartickvals = cbar.ax.yaxis.get_ticklabels()
tickVals = []
# need this roundabout trick to handle minus sign in unicode
for item in cbartickvals:
valstr = float(item.get_text().replace(u'\N{MINUS SIGN}', '-').replace('$',''))
# valstr = item.get_text().replace('\u2212', '-').replace('$','')
val = 10**float(valstr)
if abs(val) < 1000:
str = '{0:f}'.format(val)
else:
str = '{0:e}'.format(val)
tickVals.append(str)
cbartickvals = cbar.ax.yaxis.set_ticklabels(tickVals)
else:
ticks, ticklabels = list(zip(*cbarcustomticks))
# cbar = self.fig.colorbar(pmplotcf,ticks=ticks, orientation=cbarorientation)
cbar = self.fig.colorbar(pmplotcf,ticks=ticks, cax=cax)
if cbarorientation == 'vertical':
cbar.ax.set_yticklabels(ticklabels)
else:
cbar.ax.set_xticklabels(ticklabels)
if cbarorientation == 'vertical':
for t in cbar.ax.get_yticklabels():
t.set_fontsize(cbarfontsize)
else:
for t in cbar.ax.get_xticklabels():
t.set_fontsize(cbarfontsize)
#scale the axes
if pltaxis is not None:
ax.axis(pltaxis)
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# minor ticks are two points smaller than major
ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
plt.rcParams['contour.negative_linestyle'] = contour_negative_linestyle
if self.useplotly:
if self.PLmultipleYAxis:
if yInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel,autorange='reversed')))
elif xInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel,autorange='reversed'),yaxis=dict(title=ylabel)))
else:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel)))#,font=dict(title=self.PLmultiAxisTitle,side=self.PLyAxisSide,overlaying=self.PLyAxisOverlaying)))
elif self.PLmultipleXAxis:
if yInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel,autorange='reversed')))
elif xInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel,autorange='reversed'),yaxis=dict(title=ylabel)))
else:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel)))#,yaxis=PLYAxis,xaxis = PLXAxis,xaxis2=dict(title=self.PLmultiAxisTitle,side=self.PLxAxisSide,overlaying=self.PLxAxisOverlaying)))
else:
if yInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel,autorange='reversed')))
elif xInvert:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel,autorange='reversed'),yaxis=dict(title=ylabel)))
else:
self.Plotlylayout.append(Layout(title = ptitle,xaxis=dict(title=xlabel),yaxis=dict(title=ylabel)))#,xaxis = PLXAxis,yaxis=PLYAxis))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
return ax
############################################################
##
def mesh3D(self, plotnum, xvals, yvals, zvals,
ptitle=None, xlabel=None, ylabel=None, zlabel=None,
rstride=1, cstride=1, linewidth=0,
plotCol=None, edgeCol=None, pltaxis=None, maxNX=0, maxNY=0, maxNZ=0,
xScientific=False, yScientific=False, zScientific=False,
powerLimits=[-4, 2, -4, 2, -2, 2], titlefsize=12,
xylabelfsize=12, xytickfsize=10, wireframe=False, surface=True,
cmap=cm.rainbow, cbarshow=False,
cbarorientation = 'vertical', cbarcustomticks=[], cbarfontsize = 12,
drawGrid=True, xInvert=False, yInvert=False, zInvert=False,
logScale=False, alpha=1, alphawire=1,
azim=45, elev=30, distance=10, zorders=None, clip_on=True,
PLcolor=None,
PLcolorScale=None, PLtickmode=None, PLnticks=None, PLtick0=None, PLdtick=None,
PLtickvals=None, PLticktext=None, PLintensity = None
):
"""XY colour mesh plot for (xvals, yvals, zvals) input sets.
Given an existing figure, this function plots in a specified subplot position.
Only one mesh is drawn at a time. Future meshes in the same subplot
will cover any previous meshes.
The mesh grid is defined in (x,y), while the height of the mesh is the z value.
The data set must have three two dimensional arrays, each for x, y, and z.
The data in x, y, and z arrays must have matching data points.
The x and y arrays each define the grid in terms of x and y values, i.e.,
the x array contains the x values for the data set, while the y array
contains the y values. The z array contains the z values for the
corresponding x and y values in the mesh.
Use wireframe=True to obtain a wireframe plot.
Use surface=True to obtain a surface plot with fill colours.
Z-values can be plotted on a log scale, in which case the colourbar is adjusted
to show true values, but on the nonlinear scale.
The xvals and yvals vectors may have non-constant grid-intervals, i.e.,
they do not have to be on regular intervals, but z array must correspond
to the (x,y) grid.
Args:
| plotnum (int): subplot number, 1-based index
| xvals (np.array[N,M]): array of x values, corresponding to (x,y) grid
| yvals (np.array[N,M]): array of y values, corresponding to (x,y) grid
| zvals (np.array[N,M]): array of z values, corresponding to (x,y) grid
| ptitle (string): plot title (optional)
| xlabel (string): x axis label (optional)
| ylabel (string): y axis label (optional)
| zlabel (string): z axis label (optional)
| rstride (int): mesh line row (y axis) stride, every rstride value along y axis (optional)
| cstride (int): mesh line column (x axis) stride, every cstride value along x axis (optional)
| linewidth (float): mesh line width in points (optional)
| plotCol ([strings]): fill colour, list with M=1 entries, use default if None (optional)
| edgeCol ([strings]): mesh line colour , list with M=1 entries, use default if None (optional)
| pltaxis ([xmin, xmax, ymin, ymax]): scale for x,y axes. z scale is not settable. Let Matplotlib decide if None (optional)
| maxNX (int): draw maxNX+1 tick labels on x axis (optional)
| maxNY (int): draw maxNY+1 tick labels on y axis (optional)
| maxNZ (int): draw maxNY+1 tick labels on z axis (optional)
| xScientific (bool): use scientific notation on x axis (optional)
| yScientific (bool): use scientific notation on y axis (optional)
| zScientific (bool): use scientific notation on z-axis (optional)
| powerLimits[float]: scientific tick label power limits [x-neg, x-pos, y-neg, y-pos, z-neg, z-pos] (optional)
| titlefsize (int): title font size, default 12pt (optional)
| xylabelfsize (int): x-axis, y-axis, z-axis label font size, default 12pt (optional)
| xytickfsize (int): x-axis, y-axis, z-axis tick font size, default 10pt (optional)
| wireframe (bool): If True, do a wireframe plot, (optional)
| surface (bool): If True, do a surface plot, (optional)
| cmap (cm): color map for the mesh (optional)
| cbarshow (bool): if true, the show a color bar (optional)
| cbarorientation (string): 'vertical' (right) or 'horizontal' (below) (optional)
| cbarcustomticks zip([z values/float],[tick labels/string]): define custom colourbar ticks locations for given z values(optional)
| cbarfontsize (int): font size for color bar (optional)
| drawGrid (bool): draw the grid on the plot (optional)
| xInvert (bool): invert the x-axis. Flip the x-axis left-right (optional)
| yInvert (bool): invert the y-axis. Flip the y-axis left-right (optional)
| zInvert (bool): invert the z-axis. Flip the z-axis up-down (optional)
| logScale (bool): do Z values on log scale, recompute colourbar vals (optional)
| alpha (float): surface transparency (optional)
| alphawire (float): mesh transparency (optional)
| azim (float): graph view azimuth angle [degrees] (optional)
| elev (float): graph view evelation angle [degrees] (optional)
| distance (float): distance between viewer and plot (optional)
| zorder ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| PLcolor (string): Graph colors e.g 'FFFFFF'
| PLcolorScale ([int,string]): Color scale for mesh graphs e.g [0, 'rgb(0, 0, 0)']
| PLtickmode (string): Plot mode
| PLnticks (int): number of ticks
| PLtick0 (int): First tick value
| PLdtick (int):
| PLtickvals [int]: Plot intervals
| PLticktext [string]: Plot text
| PLintensity
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
from mpl_toolkits.mplot3d.axes3d import Axes3D
#if this is a log scale plot
if logScale is True:
zvals = np.log10(zvals)
#create subplot if not existing
if (self.nrow,self.ncol, plotnum) not in list(self.subplots.keys()):
self.subplots[(self.nrow,self.ncol, plotnum)] = \
self.fig.add_subplot(self.nrow,self.ncol, plotnum, projection='3d')
#get axis
ax = self.subplots[(self.nrow,self.ncol, plotnum)]
if drawGrid:
ax.grid(True)
else:
ax.grid(False)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=xylabelfsize)
if xScientific:
formx = plt.ScalarFormatter()
formx.set_scientific(True)
formx.set_powerlimits([powerLimits[0], powerLimits[1]])
ax.xaxis.set_major_formatter(formx)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=xylabelfsize)
if yScientific:
formy = plt.ScalarFormatter()
formy.set_powerlimits([powerLimits[2], powerLimits[3]])
formy.set_scientific(True)
ax.yaxis.set_major_formatter(formy)
if zlabel is not None:
ax.set_zlabel(zlabel, fontsize=xylabelfsize)
if zScientific:
formz = plt.ScalarFormatter()
formz.set_powerlimits([powerLimits[4], powerLimits[5]])
formz.set_scientific(True)
ax.zaxis.set_major_formatter(formz)
if maxNX >0:
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNX))
if maxNY >0:
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNY))
if maxNZ >0:
ax.zaxis.set_major_locator(mpl.ticker.MaxNLocator(maxNZ))
if plotCol:
col = plotCol[0]
else:
col = self.nextPlotCol()
if edgeCol:
edcol = edgeCol[0]
else:
edcol = self.nextPlotCol()
if zorders:
if len(zorders) > 1:
zorder = zorders[i]
else:
zorder = zorders[0]
else:
zorder = 1
if self.useplotly:
self.PlotlyPlotCalls = self.PlotlyPlotCalls + 1
self.PLIs3D = True
self.PLType = "mesh3D"
if cbarshow:
self.Plotlydata.append(Mesh3d(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),color=PLcolor,
colorbar = ColorBar(PLtickmode=PLtickmode,nticks=PLnticks,
PLtick0=PLtick0,PLdtick=PLdtick,PLtickvals=PLtickvals,PLticktext=PLticktext),
PLcolorscale=PLcolorScale,intensity=PLintensity))
else:
self.Plotlydata.append(Mesh3d(x=list(itertools.chain.from_iterable(xvals)),
y=list(itertools.chain.from_iterable(yvals)),
z=list(itertools.chain.from_iterable(zvals)),color=PLcolor))
# Append axis and plot titles
if self.ncol > 1:
self.PlotlySubPlotNumbers.append(plotnum)
elif self.nrow > 1 :
self.PlotlySubPlotNumbers.append(plotnum)
#do the plot
if surface:
pmplot = ax.plot_surface(xvals, yvals, zvals, rstride=rstride, cstride=cstride,
edgecolor=edcol, cmap=cmap, linewidth=linewidth, alpha=alpha,
zorder=zorder, clip_on=clip_on)
if wireframe:
pmplot = ax.plot_wireframe(xvals, yvals, zvals, rstride=rstride, cstride=cstride,
color=col, edgecolor=edcol, linewidth=linewidth, alpha=alphawire,
zorder=zorder, clip_on=clip_on)
ax.view_init(azim=azim, elev=elev)
ax.dist = distance
if cbarshow is True and cmap is not None:
#http://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
# divider = make_axes_locatable(ax)
# if cbarorientation == 'vertical':
# cax = divider.append_axes("right", size="5%", pad=0.05)
# else:
# cax = divider.append_axes("bottom", size="5%", pad=0.1)
if not cbarcustomticks:
cbar = self.fig.colorbar(pmplot,orientation=cbarorientation)
# cbar = self.fig.colorbar(pmplot,cax=cax)
if logScale:
cbartickvals = cbar.ax.yaxis.get_ticklabels()
tickVals = []
# need this roundabout trick to handle minus sign in unicode
for item in cbartickvals:
valstr = item.get_text().replace('\u2212', '-').replace('$','')
val = 10**float(valstr)
if abs(val) < 1000:
str = '{0:f}'.format(val)
else:
str = '{0:e}'.format(val)
tickVals.append(str)
cbartickvals = cbar.ax.yaxis.set_ticklabels(tickVals)
else:
ticks, ticklabels = list(zip(*cbarcustomticks))
cbar = self.fig.colorbar(pmplot,ticks=ticks, orientation=cbarorientation)
# cbar = self.fig.colorbar(pmplot,ticks=ticks, cax=cax)
if cbarorientation == 'vertical':
cbar.ax.set_yticklabels(ticklabels)
else:
cbar.ax.set_xticklabels(ticklabels)
if cbarorientation == 'vertical':
for t in cbar.ax.get_yticklabels():
t.set_fontsize(cbarfontsize)
else:
for t in cbar.ax.get_xticklabels():
t.set_fontsize(cbarfontsize)
if(ptitle is not None):
plt.title(ptitle, fontsize=titlefsize)
#scale the axes
if pltaxis is not None:
# ax.axis(pltaxis)
ax.set_xlim(pltaxis[0], pltaxis[1])
ax.set_ylim(pltaxis[2], pltaxis[3])
ax.set_zlim(pltaxis[4], pltaxis[5])
if(ptitle is not None):
ax.set_title(ptitle, fontsize=titlefsize)
# minor ticks are two points smaller than major
ax.tick_params(axis='both', which='major', labelsize=xytickfsize)
ax.tick_params(axis='both', which='minor', labelsize=xytickfsize-2)
if xInvert:
ax.set_xlim(ax.get_xlim()[::-1])
if yInvert:
ax.set_ylim(ax.get_ylim()[::-1])
if zInvert:
ax.set_zlim(ax.get_zlim()[::-1])
if self.useplotly:
if self.PLmultipleYAxis:
self.Plotlylayout.append(Layout(title = ptitle))
elif self.PLmultipleXAxis:
self.Plotlylayout.append(Layout(title = ptitle))
else:
self.Plotlylayout.append(Layout(title = ptitle))
if self.ncol > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
elif self.nrow > 1:
self.PlotlySubPlotTitles.append(ptitle)
self.PlotlyXaxisTitles.append(xlabel)
self.PlotlyYaxisTitles.append(ylabel)
return ax
############################################################
##
def polar(self, plotnum, theta, r, ptitle=None, \
plotCol=None, label=[],labelLocation=[-0.1, 0.1], \
highlightNegative=True, highlightCol='#ffff00', highlightWidth=4,\
legendAlpha=0.0, linestyle=None,\
rscale=None, rgrid=[0,5], thetagrid=[30], \
direction='counterclockwise', zerooffset=0, titlefsize=12, drawGrid=True,
zorders=None, clip_on=True, markers=[], markevery=None,
):
"""Create a subplot and plot the data in polar coordinates (linear radial orginates only).
Given an existing figure, this function plots in a specified subplot position.
The function arguments are described below in some detail. Note that the radial values
or ordinates can be more than one column, each column representing a different
line in the plot. This is convenient if large arrays of data must be plotted. If more
than one column is present, the label argument can contain the legend labels for
each of the columns/lines. The scale for the radial ordinates can be set with rscale.
The number of radial grid circles can be set with rgrid - this provides a somewhat
better control over the built-in radial grid in matplotlib. thetagrids defines the angular
grid interval. The angular rotation direction can be set to be clockwise or
counterclockwise. Likewise, the rotation offset where the plot zero angle must be,
is set with `zerooffset`.
For some obscure reason Matplitlib version 1.13 does not plot negative values on the
polar plot. We therefore force the plot by making the values positive and then highlight it as negative.
Args:
| plotnum (int): subplot number, 1-based index
| theta (np.array[N,] or [N,1]): angular abscissa in radians
| r (np.array[N,] or [N,M]): radial ordinates - could be M columns
| ptitle (string): plot title (optional)
| plotCol ([strings]): plot colour and line style, list with M entries, use default if None (optional)
| label ([strings]): legend label, list with M entries (optional)
| labelLocation ([x,y]): where the legend should located (optional)
| highlightNegative (bool): indicate if negative data must be highlighted (optional)
| highlightCol (string): negative highlight colour string (optional)
| highlightWidth (int): negative highlight line width(optional)
| legendAlpha (float): transparency for legend box (optional)
| linestyle ([str]): line style to be used in plot
| rscale ([rmin, rmax]): radial plotting limits. use default setting if None.
If rmin is negative the zero is a circle and rmin is at the centre of the graph (optional)
| rgrid ([rinc, numinc]): radial grid, use default is [0,5].
If rgrid is None don't show. If rinc=0 then numinc is number of intervals.
If rinc is not zero then rinc is the increment and numinc is ignored (optional)
| thetagrid (float): theta grid interval [degrees], if None don't show (optional)
| direction (string): direction in increasing angle, 'counterclockwise' or 'clockwise' (optional)
| zerooffset (float): rotation offset where zero should be [rad]. Positive
zero-offset rotation is counterclockwise from 3'o'clock (optional)
| titlefsize (int): title font size, default 12pt (optional)
| drawGrid (bool): draw a grid on the graph (optional)
| zorder ([int]) list of zorder for drawing sequence, highest is last (optional)
| clip_on (bool) clips objects to drawing axes (optional)
| markers ([string]) markers to be used for plotting data points (optional)
| markevery (int | (startind, stride)) subsample when using markers (optional)
Returns:
| the axis object for the plot
Raises:
| No exception is raised.
"""
if theta.ndim>1:
tt=theta
else:
if type(theta)==type( | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 09:33:17 2019
@author: WENDY
"""
import os
import csv
import pandas as pd
from jieba import analyse
from utils.config import EXPERIMENT_DIR, RAW_DATA
from src.graphviz.func import init
def textrank_extract(text, keyword_num=200):
"""
使用 text rank 提取关键词
:param text: 文本
:param keyword_num: 关键词数量
:return: keywords [关键词]
"""
textrank = analyse.textrank
keywords = textrank(text, keyword_num, withWeight=True)
# 输出抽取出的关键词
return keywords
def Comparetextrank(data1, data2):
"""
# 比较 text rank 值前后的变化
:param data1: 父节点 text rank 结果
:param data2: 子节点 text rank 结果
:return:
data_all: 对比结果
"""
dict1 = [name[0] for name in data1]
dict2 = [name[0] for name in data2]
dict_all = list(set(dict1).intersection(set(dict2)))
data_all = []
for name in dict_all:
ind1 = dict1.index(name)
ind2 = dict2.index(name)
value1 = data1[ind1][1]
value2 = data2[ind2][1]
value = value2 - value1
# 相乘作为权重
# value_all = value + value1
new = (name, value)
data_all.append(new)
data_all.sort(key=lambda x: x[1], reverse=True)
return data_all
def Getallcom(POI_comment):
"""
将 poi 的评论拼接在一起
:param POI_comment: 带拼接的评论
:return:
all_com: 拼接后的评论
"""
all_com = ''
for i in POI_comment:
all_com = all_com + i + '/'
return all_com
def geti(dirname, r, com_name, com):
"""
对每一个文档,得到关键词集合
:param dirname:
:param r:
:param com_name:
:param com:
:return:
"""
print(dirname)
resultpath = os.path.join(EXPERIMENT_DIR, dirname, 'result', f'{r}-poi.csv')
if dirname == []:
keywords = []
else:
with open(resultpath, 'r', encoding='utf-8-sig') as csvfile:
name_sub = []
reader = csv.reader(csvfile)
for line in reader:
name_sub.append(line)
del name_sub[0]
name_sub = [name[1] for name in name_sub]
com_sub = []
for name_sub_ in name_sub:
ind = (com_name.index(name_sub_))
com_sub.append(com[ind])
print(len(com_sub))
# 得到拼接后的总文档 data,得到关键词列表 data_key
data = Getallcom(com_sub)
keywords = textrank_extract(data)
return keywords
def Getdirnext(dirname_list, f=0):
"""
获得所有文件夹目录
:param dirname_list:
:param f:
:return:
"""
dirnext = []
for name in dirname_list:
for i in range(5):
if name != []:
newdir = name + os.path.sep + '%d' % i
print(newdir)
if os.path.exists(newdir):
f = 1
dirnext.append(newdir)
else:
dirnext.append([])
return dirnext, f
def Getcompare(dir_past, dir_next_sub):
"""
根据 get_dir 的结果,来求得对比序列的索引
:param dir_past:
:param dir_next_sub:
:return:
"""
dir_next_sub_i = dir_next_sub[:-1]
index = dir_past.index(dir_next_sub_i)
dir_next_sub_ind = index * 5 + int(dir_next_sub[-1])
return dir_next_sub_ind
def textrank(nowdir):
# 设置要保存的文件夹路径
nowdir, _, savedir = init(nowdir)
print("[TextRank] 待生成TextRank结果的初始文件夹: {}".format(nowdir))
print("[TextRank] 生成的TextRank结果的保存文件夹: {} ".format(savedir))
# 在没有经过筛选的文档中提取关键词
with open(os.path.join(RAW_DATA, 'POI863_flag.txt'), 'r') as f:
com = f.read().split('\n')
del com[-1]
with open(os.path.join(RAW_DATA, 'POI_name863.txt'), 'r') as f:
com_name = f.read().split('\n')
del com_name[-1]
# 得到总文档的关键词列表 keyword
POI_all = ','.join(com)
keyword = textrank_extract(POI_all)
# K_csv为总的关键词列表
K_csv = [[name[0] for name in keyword[0:20]]]
dirnext = [nowdir]
dir_all = []
f = 1
while f:
dir_all.append(dirnext)
print(dirnext)
dirnext, f = Getdirnext(dirnext)
name_seq = [0, 1, 2, 3, 4]
# 得到所有关键词的列表 data
data = []
for dirlist in dir_all:
data_sub = []
for dirlist_ in dirlist:
if dirlist_ != []:
for i in name_seq:
data_sub.append(geti(dirlist_, i, com_name, com))
data.append(data_sub)
# 得到所有的文件夹目录
nowdir_text = nowdir.split('\\')
get_dir = []
for sub_dir in dir_all:
get_dir_sub = []
for sub_dir_ in sub_dir:
if sub_dir_ != []:
sub_dir_text = sub_dir_.split('\\')
get_dir_sub.append([i for i in sub_dir_text if i not in nowdir_text])
get_dir.append(get_dir_sub)
# 生成对比的序列
for l in range(len(get_dir)):
print('第%d层' % l)
get_dir_sub = get_dir[l]
print(get_dir_sub)
if get_dir_sub == [[]]:
K_csv_sub = []
for i in name_seq:
result = Comparetextrank(keyword, data[l][i])[0:20]
K_csv_sub.append([name[0] for name in result[0:20]])
K_csv.append(K_csv_sub)
else:
K_csv_sub_total = []
for n in range(len(get_dir_sub)):
# print(n)
get_dir_sub_ = get_dir_sub[n]
K_csv_sub = []
# 得到上一层的对比列表的索引
dir_past_ind = Getcompare(get_dir[l - 1], get_dir_sub_)
# 取得对比列表
data_past = data[l - 1][dir_past_ind]
data_next = data[l][n * 5: (n + 1) * 5]
for j in name_seq:
result = Comparetextrank(data_past, data_next[j])[0:20]
K_csv_sub.append([name[0] for name in result[0:20]])
K_csv_sub_total.append(K_csv_sub)
K_csv.append(K_csv_sub_total)
# 保存所有的对比结果
print("[TextRank] 保存所有的对比结果")
write = pd.DataFrame({'feature_name': K_csv[0]})
write.to_csv(os.path.join(savedir, "top-feature.csv"), encoding="utf-8-sig")
for n in range(len(K_csv)):
kn = K_csv[n]
if n == 0:
write = | pd.DataFrame({'feature_name': kn}) | pandas.DataFrame |
import random
import pandas as pd
import copy
import math
from util import weight, check_maze, maze, order
def insert(block, map1, weight_map, count, area, df, flag, i, branch):
# print("반입 함수")
x_axis, y_axis = order.order(df)
minsize = 50
maxsize = 255
ran_num = random.randint(minsize, maxsize) # 블록 색깔
df = df
step = i
num = block.block_number # 블록 번호
pre_weight = 0
min_weight = 0
continue_block = None
# print('해당 블록 num', num)
# print("x_axis", x_axis)
x_index = x_axis.index(num) # x 축 인덱스 번호
y_index = y_axis[x_index] # y 축 값
x_axis_next = x_axis[x_index + 1:]
# print("해당 블록", num, " 출고 순서", y_index)
consider_list_2 = y_axis[:x_index] # 고려해야하는 y축 리스트
consider_list_4 = y_axis[x_index + 1:] # 고려해야하는 Y축 리스트
# print(consider_list_2)
# print(consider_list_4)
y_axis_list_2 = [y for y in consider_list_2 if y_index < y] # 나보다 늦게 출고가 되는 y축 리스트
y_axis_list_4 = [y for y in consider_list_4 if y_index > y] # 나보다 늦게 출고가 되는 y축 리스트
# print(num)
# print("왼쪽위", y_axis_list_2)
# print("오른쪽 밑", y_axis_list_4)
x_axis_block_index_2 = [p for p, q in enumerate(consider_list_2) if q in y_axis_list_2] # weight list 해당되는 x 축 인덱스
x_axis_block_index_4 = [p for p, q in enumerate(consider_list_4) if q in y_axis_list_4] # weight list 해당되는 x 축 인덱스
# asdf = [y_axis.index(i) for i in y_axis_list_4]
# asdf = [x_axis[i] for i in asdf]
x_axis_block_num_2 = [x_axis[i] for i in x_axis_block_index_2] # x축 인덱스에 해당하는 x 축 블록 번호
x_axis_block_num_4 = [x_axis_next[i] for i in x_axis_block_index_4] # x축 인덱스에 해당하는 x 축 블록 번호
# print("나중에 출고되는 블록", x_axis_block_num_2)
# print("전에 출고되는 블록", x_axis_block_num_4)
if x_axis_block_num_4: # 4사분면 마지막 블록번호
continue_block = max(y_axis_list_4)
continue_block = y_axis.index(continue_block)
continue_block = x_axis[continue_block]
# print(continue_block)
pre_weight_list = [df[df.block_number == i]['weight_val'].values[0] for i in
x_axis_block_num_2] # 해당 블록 weight_val 값
if pre_weight_list:
pre_weight = min(pre_weight_list)
block_list_4 = [[df[df.block_number == i]['width'], df[df.block_number == i]['height']] for i in x_axis_block_num_4]
pos_loc = [] # 가능 위치
width = block.width # 가로 길이
height = block.height # 세로 길이
s = maze.Maze(map1.map, width, height)
start = s.find_start(flag) # 들어갈수 있는 입구
# print("들어갈수있는 입구", start)
# print(sorted(start))
if len(start) == 0: # 들어갈 공간 X
# print("반입 불가!!")
if branch == 0:
count += 1
area += width * height
df.loc[df.block_number == num, 'position_x'] = None
df.loc[df.block_number == num, 'position_y'] = None
return count, area
for i in start:
pos_loc.extend(s.bfs(i))
if len(pos_loc) == 0: # 입구 밖에 안될 때
pos_loc.extend(start)
# print(s.maze_map)
# print("입구밖에 안돼!!", pos_loc)
weight_list = weight.weight_cal(pos_loc, weight_map, height, width)
if pre_weight: # 고려해야하는 가중치 값 존재하면
weight_list = [i for i in weight_list if i <= pre_weight] # pre_weight 보다 낮은 값들만 사용하는 리스트
else:
weight_list = []
# print("고려 가중치", pre_weight)
# print("가중치 리스트", weight_list)
if weight_list: # 2사분면 고려
max_weight = max(weight_list)
insert_loc = pos_loc[weight_list.index(max_weight)] # 가중치 합이 제일 높은 곳에 반입
# choice_list = [i for i, value in enumerate(weight_list) if value == max_weight]
# choice = random.choice(choice_list)
# insert_loc = pos_loc[choice] # 랜덤 반입
else: # weight list 가 없을 때
insert_loc = random.choice(pos_loc)
max_weight = weight_map[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width].mean()
############################
############################
# print(insert_loc)
# insert_loc = random.choice(pos_loc)
# print("적제 가능 위치 y,x = ", pos_loc)
# print("적치 위치 y,x = ", insert_loc)
df.loc[df.block_number == num, 'position_x'] = insert_loc[1]
df.loc[df.block_number == num, 'position_y'] = insert_loc[0]
df.loc[df.block_number == num, 'weight_val'] = max_weight
map1.map[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width] = 1
map1.map_color[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width] = ran_num
if continue_block is not None and branch == 0:
temp_x = insert_loc[1]
temp_y = insert_loc[0]
branch = 1
exit_point = continue_block
# print("자 드가자~!", exit_point)
copy_df = copy.deepcopy(df)
map2 = copy.deepcopy(map1)
curr = step + 1
quad_4_block_num = []
while True:
task = copy_df.loc[curr]
quad_4_block_num.append(task.block_number)
if task.type == 1:
_ = insert(task, map2, weight_map, 0, 0, copy_df, flag, curr, branch)
if task.type == 2:
_, copy_df = out(task, map2, 0, flag, copy_df, curr)
curr += 1
if task.block_number == exit_point:
break
#################################################################
# 미리한번 돌림
# min_weight = copy_df[copy_df.block_number == exit_point]['weight_val'].values[0] # 이건 마지막 거리 이용
min_weight_list = [copy_df[copy_df.block_number == i]['weight_val'].values[0] for i in quad_4_block_num]
min_weight = max(min_weight_list) # 4사분면 블록중 가장 높은 최단거리 (이보다 안에 있어야함)
# print(quad_4_block_num)
# print(min_weight_list)
# print(min_weight)
# print(min_weight)
# print(pre_weight)
# print(weight_list)
map1.map[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width] = 0
map1.map_color[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width] = 0
if min_weight:
weight_list = [i for i in weight_list if min_weight <= i if max_weight >= i]
# print(weight_list)
# print("change", weight_list)
if weight_list: # 4사분면
max_weight = max(weight_list)
insert_loc = pos_loc[weight_list.index(max_weight)] # 가중치 합이 제일 높은 곳에 반입
# choice_list = [i for i, value in enumerate(weight_list) if value == max_weight]
# choice = random.choice(choice_list)
# insert_loc = pos_loc[choice] # 랜덤 반입
else: # weight list 가 없을 때
insert_loc = random.choice(pos_loc)
min_weight = weight_map[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width].mean()
df.loc[df.block_number == num, 'position_x'] = insert_loc[1]
df.loc[df.block_number == num, 'position_y'] = insert_loc[0]
df.loc[df.block_number == num, 'weight_val'] = min_weight
map1.map[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width] = 1
map1.map_color[insert_loc[0]:insert_loc[0] + height, insert_loc[1]:insert_loc[1] + width] = ran_num
block_data = block.to_dict() # 블록 데이터 가공
block_data['position_x'] = insert_loc[1]
block_data['position_y'] = insert_loc[0]
block_data['weight_val'] = max_weight
map1.block_data(block_data) # 맵 객체에 블록 데이터 추가
# print(map1.data)
# print(len(map1.data))
return count, area
# def out(block, map1, count, flag):
# if math.isnan(block.position_x):
# return count
# no_out = False
# # print("반출 함수")
# pos_loc = []
# entrance_list = [] # 출고 가능 입구 리스트
# width = block.width # 가로 길이
# height = block.height # 세로 길이
# x = block.position_x
# y = block.position_y
# x = int(x)
# y = int(y)
# start = (y, x) # numpy 역전
# map1.map[y:y + height, x:x + width] = 0
# map1.map_color[y:y + height, x:x + width] = 0
# s = maze.Maze(map1.map, width, height)
# entrance_list.extend(s.find_start(flag))
# # print(entrance_list)
# pos_loc.extend(s.bfs(start))
# # print(pos_loc)
# for i in entrance_list:
# if i in pos_loc:
# # print("반출 가능")
# no_out = False
# break
# else:
# ################################################
# # 출고 제약사항 여기서 작성
# # print("바로 출고 불가능")
# no_out = True
#
# # 비용증가
# if no_out:
# # print("반출 불가")
# count += 1
# return count
def out(block, map1, count, flag, df, curr):
test_map = copy.deepcopy(map1)
if math.isnan(block.position_x):
# print("문제 발생!!!")
# print("반입 금지로 인한 반출X")
# print(map1.block_num_map())
# print(df)
# print(block)
# input()
return count, df
curr_block_index = None
num_map = test_map.block_num_map()
for index, j in enumerate(test_map.data): # 블록 데이터 pop
if j['block_number'] == block.block_number:
curr_block_index = index
# print(block)
# print(num_map)
# print(df)
map1.data.pop(curr_block_index)
test_map.data.pop(curr_block_index) # 밑에 계산 때문에 잠시 제외
# width, height, x, y = trans_data(block)
no_out = out_check(block, test_map, flag, 1)
# 비용증가
if no_out:
# print("드가자 ")
# print(num_map)
# before = test_map.cv2_map()
# before = cv2.resize(before, (600, 600), interpolation=cv2.INTER_NEAREST)
# cv2.namedWindow('before', cv2.WINDOW_NORMAL)
# cv2.imshow('before', before)
# cv2.waitKey(0)
obstruct_block_index = None
obstruct_block = find_out(test_map.data, block, flag, test_map, num_map)
if block.block_number in obstruct_block:
obstruct_block.remove(block.block_number)
# TODO 간섭 블록 어떻게 할지만 정해주면 됨
for x in obstruct_block:
# # 데이터 프레임 추가
# print('현재 인덱스{}'.format(curr))
# print('간섭블록{}'.format(x))
# print(df)
# temp = df.loc[df.block_number == x]
# print(temp)
# temp = temp.iloc[-1]
# temp['date'] = df.loc[curr]['date']
# temp['type'] = 1
# temp1 = df[df.index <= curr]
# temp2 = df[df.index > curr]
# df = temp1.append(temp, ignore_index=True).append(temp2, ignore_index=True)
# df.loc[curr + 1] = temp
# print(df)
# print("자 드가자", x)
# print('현재 인덱스{}'.format(curr))
# print('간섭블록{}'.format(x))
# order.order(df)
# 데이터 삭제
for index, j in enumerate(map1.data): # 블록 데이터 pop
if j['block_number'] == x:
obstruct_block_index = index
# print('현재 인덱스{}'.format(curr))
# print('간섭블록{}'.format(x))
temp = pd.DataFrame(map1.data[obstruct_block_index], index=[0])
erase = | pd.Series(map1.data[obstruct_block_index]) | pandas.Series |
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
#naming convention
'''
render_figurename
e.g. :
render_annual_timeseries
#standardize color : #7febf5
'''
def load_data() :
data = pd.read_csv('Air_Traffic_Passenger_Statistics.csv')
data = data.replace('United Airlines - Pre 07/01/2013', 'United Airlines')
data['Period'] = data['Activity Period'].astype('string')
data = data.drop_duplicates(keep='first')
data = data.drop(columns=['Activity Period'])
# all geo data
data['GEO Region'] = data['GEO Region'].replace('Canada', 'North America')
data['GEO Region'] = data['GEO Region'].replace('US', 'North America')
data['GEO Region'] = data['GEO Region'].replace('Australia / Oceania', 'Australia')
data['GEO Region'] = data['GEO Region'].replace('Middle East', 'Asia')
data['GEO Region'] = data['GEO Region'].replace('Central America', 'South America')
data['GEO Region'] = data['GEO Region'].replace('Mexico', 'South America')
return data
data = load_data()
passanger_count_group_period = data.groupby(['Period']).agg(**{'Passenger Count_sum': ('Passenger Count', 'sum')}).reset_index()
passanger_count_group_period['Period'] = | pd.to_datetime(passanger_count_group_period['Period'], format='%Y%m') | pandas.to_datetime |
"""
Evaluate Classifier Predictions
Modified from PDX PPTC Machine Learning Analysis
https://github.com/marislab/pdx-classification
Rokita et al. Cell Reports. 2019.
https://doi.org/10.1016/j.celrep.2019.09.071
<NAME>, 2018
Modified by <NAME> for OpenPBTA, 2020
This script evaluates the predictions made by the NF1 and TP53 classifiers in the input PPTC RNAseq data.
## Procedure
1. Load status matrices
* These files store the mutation status for TP53 and NF1 for the input samples (see 00-tp53-nf1-alterations.R for more information)
2. Align identifiers
* The identifiers matching the RNAseq data to the status matrix are not aligned.
* I use an intermediate dictionary to map common identifiers
3. Load predictions (see 01-apply-classifier.py for more details)
4. Evaluate predictions
* I visualize the distribution of predictions between wild-type and mutant samples for both classifiers
## Output
The output of this notebook are several evaluation figures demonstrating the predictive performance on the input data for the three classifiers.
"""
import os
import random
from decimal import Decimal
from scipy.stats import ttest_ind
import numpy as np
import pandas as pd
from sklearn.metrics import average_precision_score, roc_auc_score
from sklearn.metrics import roc_curve, precision_recall_curve
import seaborn as sns
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] arguments")
parser.add_option(
"-s", "--statusfile", dest="status_file", help="TP53 and NF1 status file"
)
parser.add_option("-f", "--file", dest="filename", help="scores output file ")
parser.add_option(
"-c", "--clinical", dest="clinical", help="pbta-histologies.tsv clinical file"
)
parser.add_option(
"-o",
"--output_basename",
dest="outputfile",
help="output plots basename for TP53 and NF1 ROC curves",
)
(options, args) = parser.parse_args()
status_file = options.status_file
scores_file = options.filename
clinical = options.clinical
outputfilename = options.outputfile
np.random.seed(123)
# read TP53/NF1 alterations
full_status_df = | pd.read_table(status_file, low_memory=False) | pandas.read_table |
from strategy.rebalance import get_relative_to_expiry_rebalance_dates, \
get_fixed_frequency_rebalance_dates, \
get_relative_to_expiry_instrument_weights
from strategy.calendar import get_mtm_dates
import pandas as pd
import pytest
from pandas.util.testing import assert_index_equal, assert_frame_equal
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key], check_names=False)
def test_tradeables_dates():
# no CME holdiays between this date range
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
exchanges = ["CME"]
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.date_range(
"2015-01-02", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with an adhoc holiday
holidays = [pd.Timestamp("2015-01-02")]
tradeable_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
exp_tradeable_dates = pd.date_range(
"2015-01-03", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with CME holiday (New Years day)
sd = pd.Timestamp("2015-01-01")
ed = pd.Timestamp("2015-01-02")
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.DatetimeIndex([pd.Timestamp("2015-01-02")])
assert_index_equal(tradeable_dates, exp_tradeable_dates)
def test_relative_to_expiry_rebalance_dates():
# each contract rolling individually, same offset
# change to ES and TY
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015TYH", "2015-02-27", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"]],
columns=["contract", "first_notice", "last_trade"]
)
offsets = -3
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-24", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling all monthly contracts together, same offset
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=True, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(["2015-01-02", "2015-02-24"])
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling each contract individually, different offset
offsets = {"ES": -3, "TY": -4}
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-23", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
def test_relative_to_expiry_weights():
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015ESU", "2015-09-18", "2015-09-18"],
["2015TYH", "2015-03-16", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"],
["2015TYU", "2015-08-31", "2015-09-21"]],
columns=["contract", "first_notice", "last_trade"]
)
# one generic and one product
dts = pd.date_range("2015-03-17", "2015-03-18", freq="B")
offsets = -3
root_gnrcs = {"ES": ["ES1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame(
[1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple products
dts = pd.date_range("2015-03-13", "2015-03-20", freq="B")
offsets = -1
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESH"),
( | pd.Timestamp("2015-03-18") | pandas.Timestamp |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
| tm.assert_index_equal(s2.index, s.index) | pandas._testing.assert_index_equal |
from elasticsearch_dsl.query import Query
from fastapi import APIRouter
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q, Index
from typing import List, Dict, Any
from app.types import Node, Keyphrase
import json
import os
import pandas as pd
import spacy
import pytextrank
nlp = spacy.load("en_core_web_sm")
nlp.add_pipe("textrank")
from app.controllers.graph.converter import get_graph, get_overview_graph
from app.services.graph.node import get_anchor_property_values
from app.utils.timer import use_timing
router = APIRouter()
es = Elasticsearch("csx_elastic:9200", retry_on_timeout=True)
def convert_query_to_df(query, search):
results = search.query(query).execute()
elastic_list = []
for entry in results["hits"]["hits"]:
entry_dict = entry["_source"].to_dict()
entry_dict["entry"] = entry["_id"]
elastic_list.append(entry_dict)
return | pd.DataFrame(elastic_list) | pandas.DataFrame |
"""
Calculate MQA scores only for the resolved region from local score.
MQA methods:
- DeepAccNet
- P3CMQA
- ProQ3D
- VoroCNN
"""
import argparse
import os
import subprocess
import tarfile
from pathlib import Path
from typing import Any, List, Union
import numpy as np
import pandas as pd
from prody import parsePDB, writePDB
from tqdm import tqdm
data_dir = Path('../../data')
interim_path = data_dir / 'interim'
score_dir = data_dir / 'out/dataset/score/mqa'
def open_tar(tar_file: Union[str, Path]) -> tarfile.TarFile:
return tarfile.open(tar_file, 'r:gz')
def get_resolved_pdb(target: str, resolved_indices: List[int]) -> Path:
target_pdb_dir = data_dir / 'out/dataset/alphafold_output' / target
pdb_resolved_dir = data_dir / 'out/dataset/pdb/pdb_resolved'
pdb_resolved_target_dir = pdb_resolved_dir / target
pdb_resolved_target_dir.mkdir(parents=True, exist_ok=True)
for pdb in target_pdb_dir.glob('*.pdb'):
pdb_name = pdb.stem
output_pdb_path = pdb_resolved_target_dir / f'{pdb_name}.pdb'
if output_pdb_path.exists():
continue
mol = parsePDB(pdb)
resindices = mol.getResnums() - 1
resolved_atom_indices = np.where(np.isin(resindices, resolved_indices))[0]
mol_resolved = mol[resolved_atom_indices]
writePDB(str(output_pdb_path), mol_resolved)
return pdb_resolved_target_dir
class CalcResolvedConfidence:
missing_dict = np.load(interim_path / 'missing_residues.npy', allow_pickle=True).item()
def __init__(self, method: str, target_csv: Union[str, Path]):
self.method = method
self.target_df = pd.read_csv(target_csv, index_col=0)
def __call__(self, *args: Any, **kwds: Any) -> Any:
results = []
with tqdm(self.target_df.iterrows(), total=len(self.target_df)) as pbar:
for _, row in pbar:
target = row['id']
pbar.set_description(f'Target = {target}')
length = row['length']
result = self.for_target(target, length)
results.append(result)
if sum([1 if result is None else 0 for result in results]) > 0:
print(f'{self.method} calculation not yet finished')
exit()
return pd.concat(results)
def for_target(self, target: str, length: int) -> Union[pd.DataFrame, None]:
resolved_indices = self.get_resolved_indices(target, length)
if self.method == 'DeepAccNet' or self.method == 'DeepAccNet-Bert':
result = self.DeepAccNet(target, length)
elif self.method == 'P3CMQA' or self.method == 'Sato-3DCNN':
result = self.P3CMQA(target, resolved_indices)
elif self.method == 'ProQ3D':
result = self.ProQ3D(target, resolved_indices)
elif self.method == 'VoroCNN':
result = self.VoroCNN(target, resolved_indices)
elif self.method == 'DOPE':
result = self.DOPE(target, resolved_indices)
elif self.method == 'SBROD':
result = self.SBROD(target, resolved_indices)
else:
raise ValueError(f'Unknown method: {self.method}')
return result
@classmethod
def get_resolved_indices(cls, target: str, length: int) -> List[int]:
return np.setdiff1d(np.arange(length), cls.missing_dict[target])
def DeepAccNet(self, target: str, length: int) -> Union[pd.DataFrame, None]:
deepaccnet_path = score_dir / 'DeepAccNet'
result_path = deepaccnet_path / f'{target}_resolved.csv'
# if calculation already finished
if result_path.exists():
result_df = pd.read_csv(result_path, index_col=0)
return result_df
# if calculation not yet finished
os.chdir('DeepAccNet')
cmd = ['qsub', '-g', 'tga-ishidalab', './get_score_resolved.sh', target, str(length)]
subprocess.run(cmd)
os.chdir('..')
return None
def P3CMQA(self, target: str, resolved_indices: List[int]) -> pd.DataFrame:
p3cmqa_path = score_dir / self.method
tar_path = p3cmqa_path / f'{target}.tar.gz'
tar = open_tar(tar_path)
results = []
for tarinfo in tar:
if tarinfo.name.endswith('.csv'):
if Path(tarinfo.name).stem == target:
continue
f = tar.extractfile(tarinfo.name)
local_df = pd.read_csv(f, index_col=0)
resolved_score = np.mean(local_df['Score'][resolved_indices])
results.append([Path(tarinfo.name).stem, resolved_score])
result_df = pd.DataFrame(results, columns=['Model', f'{self.method}_resolved'])
result_df['Target'] = target
return result_df
def ProQ3D(self, target: str, resolved_indices: List[int]) -> pd.DataFrame:
proq3d_path = score_dir / self.method
tar_path = proq3d_path / f'{target}.tar.gz'
tar = open_tar(tar_path)
results = []
for tarinfo in tar:
if tarinfo.name.endswith('.local'):
f = tar.extractfile(tarinfo.name)
local_df = pd.read_csv(f, sep=' ')
resolved_score_dict = local_df.iloc[resolved_indices].mean().to_dict()
resolved_score_dict['Model'] = Path(tarinfo.name).stem.split('.')[0]
results.append(resolved_score_dict)
result_df = | pd.DataFrame(results) | pandas.DataFrame |
"""This module is meant to contain the OpenSea class"""
from messari.dataloader import DataLoader
from messari.utils import validate_input
from string import Template
from typing import Union, List
import pandas as pd
# Reference: https://docs.opensea.io/reference/api-overview
# TODO, api key as header
ASSET_URL = Template('https://api.opensea.io/api/v1/asset/$contract/$id/')
CONTRACT_URL = Template('https://api.opensea.io/api/v1/asset_contract/$contract')
COLLECTION_URL = Template('https://api.opensea.io/api/v1/collection/$collection')
STATS_URL = Template('https://api.opensea.io/api/v1/collection/$collection/stats')
EVENTS_URL = 'https://api.opensea.io/api/v1/events'
HEADERS = {'Accept': 'application/json'}
class OpenSea(DataLoader):
"""This class is a wrapper around the OpenSea API
"""
def __init__(self, api_key=None):
api_dict = None
if api_key:
api_dict = {'X-API-KEY': api_key}
DataLoader.__init__(self, api_dict=api_dict, taxonomy_dict=None)
def get_asset(self, contracts_in: Union[str, List], assets_in: Union[str, List], account: str=None) -> pd.DataFrame:
"""Used to fetch more in-depth information about an individual asset
Parameters
----------
contracts_in: str, List
single contract address in or list of contract addresses
Returns
-------
DataFrame
DataFrame with asset info
"""
params = {}
if account:
params['account_address'] = account
# check api-key to update headers
if self.api_dict:
headers = HEADERS | self.api_dict
else:
headers = HEADERS
contracts = validate_input(contracts_in)
assets = validate_input(assets_in)
df_list=[]
for contract in contracts:
series_list=[]
for asset in assets:
endpoint_url = ASSET_URL.substitute(contract=contract, id=asset)
response = self.get_response(endpoint_url, params=params, headers=headers)
tmp_series= | pd.Series(response) | pandas.Series |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = | pd.DataFrame.from_dict(dicRF) | pandas.DataFrame.from_dict |
import networkx as nx
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from ADvis.ADnum import ADnum
from mpl_toolkits.mplot3d import Axes3D
def gen_graph(y):
""" Function to create a directed graph from an ADnum.
INPUTS
======
y : ADnum
OUTPUTS
=======
A networkx digraph
"""
G = nx.DiGraph()
d = y.graph
if len(d)== 0:
G.add_node(y)
for key in d:
G.add_node(key)
neighbors = d[key]
for neighbor in neighbors:
G.add_edge(key, neighbor[0], label = neighbor[1])
return G
def reverse_graph(y):
""" Function to create a dictionary containing edges of y reversed.
INPUTS
======
y : ADnum
OUTPUTS
=======
A dictionary
"""
d = y.graph
parents = {}
for key in d:
neighbors = d[key]
for neighbor in neighbors:
if neighbor[0] not in parents:
parents[neighbor[0]] = []
parents[neighbor[0]].append((key, neighbor[1]))
return parents
def get_labels(y):
""" Function to generate labels for plotting networkx graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A dictionary of ADnum objects mapped to string labels
"""
parents = reverse_graph(y)
total = len(y.graph) - sum([entry.constant for entry in y.graph.keys()])
new_names = {}
nodes = [y]
while len(nodes)>0:
node = nodes.pop(0)
if node not in new_names:
if node.constant:
new_names[node] = str(np.round(node.val, decimals=1))
else:
new_names[node] = 'X' + str(total)
total = total - 1
if node in parents:
neighbors = parents[node]
for neighbor in neighbors:
nodes.append(neighbor[0])
return new_names
def get_labels_rev(y):
""" Function to generate labels for plotting networkx graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A dictionary of ADnum objects mapped to string labels
"""
parents = reverse_graph(y)
#total = len(y.graph) - sum([entry.constant for entry in y.graph.keys()])
total = 0
new_names = {}
nodes = [y]
while len(nodes)>0:
node = nodes.pop()
if node not in new_names:
if node.constant:
new_names[node] = str(np.round(node.val, decimals=1))
else:
new_names[node] = 'X' + str(total)
total = total + 1
if node in parents:
neighbors = parents[node]
for neighbor in neighbors:
nodes.append(neighbor[0])
return new_names
def get_colors(G, y):
""" Function to assign colors to nodes in the graph.
INPUTS
======
G : networkx digraph
y : ADnum
OUTPUTS
=======
A list of colors for the graph
"""
colors = []
parents = reverse_graph(y)
for node in G:
if node.constant:
colors.append('blue')
else:
if node == y:
colors.append('green')
else:
if node in parents:
colors.append('red')
else:
colors.append('magenta')
return colors
def get_sizes(G, y, labs):
""" Function to assign sizes to nodes in the graph.
INPUTS
======
G : networkx digraph
y : ADnum
labs : dictionary of graph labels
OUTPUTS
=======
A list of sizes for the graph
"""
sizes = []
for node in G:
label = labs[node]
sizes.append(len(label)*200)
return sizes
def draw_graph(y):
""" Function to draw the graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A plot of the graph
"""
fig = plt.figure()
G = gen_graph(y)
edge_labs = nx.get_edge_attributes(G, 'label')
pos = nx.spring_layout(G)
labs = get_labels(y)
nx.draw_networkx(G, pos, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labs)
limits = plt.axis('off')
mag_patch = mpatches.Patch(color = 'magenta', label = 'input')
red_patch = mpatches.Patch(color = 'red', label = 'intermediate')
blue_patch = mpatches.Patch(color = 'blue', label = 'constant')
green_patch = mpatches.Patch(color = 'green', label = 'output')
plt.legend(handles = [mag_patch, red_patch, blue_patch, green_patch])
return fig
def draw_graph2(y, G, edge_labs, pos, labs):
""" Function to draw the graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A plot of the graph
"""
fig = plt.figure()
#G = gen_graph(y)
#edge_labs = nx.get_edge_attributes(G, 'label')
#pos = nx.spring_layout(G)
#labs = get_labels(y)
nx.draw_networkx(G, pos, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labs)
limits = plt.axis('off')
mag_patch = mpatches.Patch(color = 'magenta', label = 'input')
red_patch = mpatches.Patch(color = 'red', label = 'intermediate')
blue_patch = mpatches.Patch(color = 'blue', label = 'constant')
green_patch = mpatches.Patch(color = 'green', label = 'output')
plt.legend(handles = [mag_patch, red_patch, blue_patch, green_patch])
plt.show()
#return fig
def draw_graph_rev(y):
""" Function to draw the graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A plot of the graph
"""
fig = plt.figure()
G = gen_graph(y)
G = G.reverse()
edge_labs = nx.get_edge_attributes(G, 'label')
pos = nx.spring_layout(G)
labs = get_labels(y)
#labs = get_labels_rev(y)
nx.draw_networkx(G, pos, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labs)
limits = plt.axis('off')
mag_patch = mpatches.Patch(color = 'magenta', label = 'input')
red_patch = mpatches.Patch(color = 'red', label = 'intermediate')
blue_patch = mpatches.Patch(color = 'blue', label = 'constant')
green_patch = mpatches.Patch(color = 'green', label = 'output')
plt.legend(handles = [mag_patch, red_patch, blue_patch, green_patch])
return fig
def draw_graph_rev2(y, G, edge_labs, pos, labs):
""" Function to draw the graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A plot of the graph
"""
fig = plt.figure()
#G = gen_graph(y)
G = G.reverse()
#edge_labs = nx.get_edge_attributes(G, 'label')
#pos = nx.spring_layout(G)
#labs = get_labels(y)
#labs = get_labels_rev(y)
nx.draw_networkx(G, pos, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labs)
limits = plt.axis('off')
mag_patch = mpatches.Patch(color = 'magenta', label = 'input')
red_patch = mpatches.Patch(color = 'red', label = 'intermediate')
blue_patch = mpatches.Patch(color = 'blue', label = 'constant')
green_patch = mpatches.Patch(color = 'green', label = 'output')
plt.legend(handles = [mag_patch, red_patch, blue_patch, green_patch])
plt.show()
#return fig
def get_graph_setup(y):
G = gen_graph(y)
#G = G.reverse()
edge_labs = nx.get_edge_attributes(G, 'label')
pos = nx.spring_layout(G, k=.15, iterations=20)
labs = get_labels(y) #changed from get labels rev
return G, edge_labs, pos, labs
def axis_reverse_edge(y, G, edge_labs, pos, labs, ax, edgelist, idx):
edge = edgelist[idx]
nx.draw_networkx(G, pos, ax = ax, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edges(G, pos=pos, ax=ax, edgelist = edge, width = 4, edge_color = 'y', style = 'dashed')
nx.draw_networkx_edge_labels(G, pos=pos, ax=ax, edge_labels = edge_labs)
limits = plt.axis('off')
def draw_graph_rev_dynamic(y, edgelist, G, edge_labs, pos, labs, val):
edgelist.reverse()
fig = plt.figure()
#G, edge_labs, pos, labs = get_graph_setup(y)
G = G.reverse()
ax = fig.add_subplot(111)
plt.title('Press enter to start. \n Then use the left and right arrow keys to step through the calculation.')
plt.axis("off")
global curr_pos
curr_pos = 0
#axis_reverse_edge(y, G, edge_labs, pos, labs, ax, edgelist, curr_pos)
#plt.show()
def key_event(e):
global curr_pos
ax.cla()
if e.key == 'enter':
curr_pos = curr_pos
elif e.key == 'right':
curr_pos = curr_pos +1
if curr_pos >= len(edgelist):
curr_pos = len(edgelist)-1
elif e.key == 'left':
curr_pos = curr_pos -1
if curr_pos<0:
curr_pos = 0
else:
return
#curr_pos = curr_pos%len(edgelist)
axis_reverse_edge(y, G, edge_labs, pos, labs, ax, edgelist, curr_pos)
if curr_pos == len(edgelist)-1:
plt.title('Step ' + str(curr_pos+1) +': Calculation Complete')
else:
plt.title('Step ' + str(curr_pos+1))
plt.show()
if len(edgelist)>0:
fig.canvas.mpl_connect('key_press_event', key_event)
elif val == 1:
plt.close()
draw_graph_rev2(y, G, edge_labs, pos, labs)
else:
#draw_graph_rev2(y, G, edge_labs, pos, labs)
plt.title('No dependence on input variable.')
plt.show()
def draw_graph_rev_dynamic_old(y, edgelist):
""" Function to draw the graph.
INPUTS
======
y : ADnum
OUTPUTS
=======
A plot of the graph
"""
edgelist.reverse()
fig = plt.figure()
G = gen_graph(y)
G = G.reverse()
edge_labs = nx.get_edge_attributes(G, 'label')
pos = nx.spring_layout(G)
labs = get_labels_rev(y)
nx.draw_networkx(G, pos, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labs)
limits = plt.axis('off')
mag_patch = mpatches.Patch(color = 'magenta', label = 'input')
red_patch = mpatches.Patch(color = 'red', label = 'intermediate')
blue_patch = mpatches.Patch(color = 'blue', label = 'constant')
green_patch = mpatches.Patch(color = 'green', label = 'output')
plt.legend(handles = [mag_patch, red_patch, blue_patch, green_patch])
figset = []
for edge in edgelist:
fignew = plt.figure()
nx.draw_networkx(G, pos, labels = labs, node_color = get_colors(G, y), node_size = get_sizes(G, y, labs), font_color= 'white')
nx.draw_networkx_edges(G, pos=pos, edgelist = edge, width = 4, edge_color = 'y', style = 'dashed')
nx.draw_networkx_edge_labels(G, pos=pos, edge_labels = edge_labs)
figset.append(fignew)
return fig, figset
def gen_table(y):
""" Function to generate tables for the ADnum.
INPUTS
======
y : ADnum
OUTPUTS
=======
A pandas data frame of the computational traces
"""
parents = reverse_graph(y)
labs = get_labels(y)
visited = []
data = {}
data['Trace'] = []
data['Operation']=[]
data['Value']= []
data['Derivative']=[]
nodes = [y]
while len(nodes)>0:
node = nodes.pop()
if node not in visited:
if node.constant:
visited.append(node)
else:
visited.append(node)
data['Trace'].append(labs[node])
data['Value'].append(node.val)
data['Derivative'].append(node.der)
if node in parents:
if len(parents[node]) == 1:
link = parents[node][0][1]+'('+labs[parents[node][0][0]]+')'
else:
link = parents[node][0][1]+'(' +labs[parents[node][0][0]]+ ' , ' + labs[parents[node][1][0]] + ')'
neighbors = parents[node]
for neighbor in neighbors:
nodes.append(neighbor[0])
else:
link = 'input'
data['Operation'].append(link)
result = pd.DataFrame.from_dict(data)
result['Number'] = [int(name[1:]) for name in result['Trace']]
result2 = result.sort_values('Number')
resultorder = result2[['Trace', 'Operation', 'Value', 'Derivative']]
return resultorder
def gen_table_rev(y):
""" Function to generate tables for the ADnum.
INPUTS
======
y : ADnum
OUTPUTS
=======
A pandas data frame of the computational traces
"""
parents = reverse_graph(y)
labs = get_labels(y)
#labs = get_labels_rev(y)
visited = []
data = {}
data['Trace'] = []
data['Operation']=[]
#data['Value']= []
data['Derivative']=[]
data['Weight'] = []
nodes = [y]
while len(nodes)>0:
node = nodes.pop()
if node not in visited:
if node.constant:
visited.append(node)
else:
visited.append(node)
data['Trace'].append(labs[node])
#data['Value'].append(node.val)
data['Derivative'].append(node.der)
data['Weight'].append(node.rder)
if node in parents:
if len(parents[node]) == 1:
link = parents[node][0][1]+'('+labs[parents[node][0][0]]+')'
else:
link = parents[node][0][1]+'(' +labs[parents[node][0][0]]+ ' , ' + labs[parents[node][1][0]] + ')'
neighbors = parents[node]
for neighbor in neighbors:
nodes.append(neighbor[0])
else:
link = 'input'
data['Operation'].append(link)
result = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
self.assertEqual(hash(v), hash(td))
d = {td: 2}
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
tds))
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
self.assertNotEqual(hash(ns_td), hash(ns_td.to_pytimedelta()))
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
self.assertTrue(min_td.value == np.iinfo(np.int64).min + 1)
self.assertTrue(max_td.value == np.iinfo(np.int64).max)
# Beyond lower limit, a NAT before the Overflow
self.assertIsInstance(min_td - Timedelta(1, 'ns'),
NaTType)
with tm.assertRaises(OverflowError):
min_td - Timedelta(2, 'ns')
with tm.assertRaises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
self.assertIsInstance(td, NaTType)
with tm.assertRaises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with tm.assertRaises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
result_method = data.sub(delta)
result_operator = data - delta
expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
# GH 9396
result_method = data.div(delta)
result_operator = data / delta
expected = pd.Series([np.nan, 32.], dtype='float64')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
self.assertFalse(result.iloc[0].isnull().all())
self.assertTrue(result.iloc[1].isnull().all())
def test_isoformat(self):
td = Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10, nanoseconds=12)
expected = 'P6DT0H50M3.010010012S'
result = td.isoformat()
self.assertEqual(result, expected)
td = Timedelta(days=4, hours=12, minutes=30, seconds=5)
result = td.isoformat()
expected = 'P4DT12H30M5S'
self.assertEqual(result, expected)
td = Timedelta(nanoseconds=123)
result = td.isoformat()
expected = 'P0DT0H0M0.000000123S'
self.assertEqual(result, expected)
# trim nano
td = Timedelta(microseconds=10)
result = td.isoformat()
expected = 'P0DT0H0M0.00001S'
self.assertEqual(result, expected)
# trim micro
td = | Timedelta(milliseconds=1) | pandas.Timedelta |
# Import Module
import PyPDF2
from PyPDF2.utils import PdfReadError
import pdfx
from urlextract import URLExtract
import requests
import fitz
import click
import argparse
import os
from urllib.parse import urlparse, ParseResult
from fpdf import FPDF
import gspread
import pandas as pd
from gspread_dataframe import get_as_dataframe, set_with_dataframe
#import pdb;pdb.set_trace()
# Parse args
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-p','--path', help='Localization of the files', default= "./CitationSaver/")
parser.add_argument('-d','--destination', help='Destination of the URLs extract', default= "./URLs/")
parser.add_argument('-a','--afterprocessed', help='Destination of the files processed', default= "./Processed/")
parser.add_argument('-w','--pathwarc', help='Destination of the WARCs for each file', default= "./WARCs/")
parser.add_argument('-j','--pathjson', help='Destination of the json file with google service key', default= "JSON")
parser.add_argument('-k','--key', help='Key Google Spreadsheet', default= "KEY")
parser.add_argument('-ws','--worksheet', help='Worksheet Google Spreadsheet', default= "WORKSHEET")
args = vars(parser.parse_args())
#Connect gspread
gc = gspread.service_account(filename=args['pathjson'])
sh = gc.open_by_key(args['key'])
worksheet = sh.worksheet(args['worksheet'])
#Transform worksheet to pandas dataframe
df = get_as_dataframe(worksheet)
#Global variable with the URLs check for each document
list_urls_check = []
# Extract URLs from text
def extract_url(text, list_urls):
extractor = URLExtract()
urls = extractor.find_urls(text)
for url in urls:
url = url.replace(",", "")
if "http" in url:
url = url[url.find('http'):]
if url not in list_urls:
list_urls.append(url)
# Check if the URLs is available
def check_url(scheme, netloc, path, url_parse, output):
url_parse = ParseResult(scheme, netloc, path, *url_parse[3:])
response = requests.head(url_parse.geturl())
if str(response.status_code).startswith("2") or str(response.status_code).startswith("3"):
output.write(url_parse.geturl()+"\n")
list_urls_check.append(url_parse.geturl())
else:
url_parse = ParseResult("https", netloc, path, *url_parse[3:])
response = requests.head(url_parse.geturl())
if str(response.status_code).startswith("2") or str(response.status_code).startswith("3"):
output.write(url_parse.geturl()+"\n")
list_urls_check.append(url_parse.geturl())
def check_pdf(file_name, file):
try:
pdf = PyPDF2.PdfFileReader(file_name)
return True
except PdfReadError:
return False
def extract_urls_pdf(file, file_name, list_urls):
#First method: PyPDF2
# Open File file
pdfFileObject = open(file_name, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
# Iterate through all pages
for page_number in range(pdfReader.numPages):
pageObject = pdfReader.getPage(page_number)
# Extract text from page
pdf_text = pageObject.extractText()
extract_url(pdf_text, list_urls)
if not list_urls:
#Update GoogleSheet
update_google_sheet(file, "", "", "", "Problem using PyPDF2 process", True)
# CLose the PDF
pdfFileObject.close()
#Second method: PDFx
# Read PDF File
pdf = pdfx.PDFx(file_name)
# Get list of URL
json = pdf.get_references_as_dict()
if len(json) != 0:
for elem in json['url']:
if elem not in list_urls:
list_urls.append(elem)
else:
#Update GoogleSheet
update_google_sheet(file, "", "", "", "Problem using PDFx process", True)
#Third method: fitz
# Load PDF
with fitz.open(file_name) as doc:
text = ""
for page in doc:
text += page.getText().strip()#.replace("\n", "")
text = ' '.join(text.split())
extract_url(text, list_urls)
def check_urls(list_urls, output_file):
urls_to_google_sheet = []
if list_urls != []:
# Process the URLs
with open(output_file, 'w') as output:
# Remove mailto links
links = [url for url in list_urls if "mailto:" not in url]
for elem in links:
#Remove trash at the end of the URLs
if elem.endswith(";") or elem.endswith(".") or elem.endswith(")") or elem.endswith("/"):
elem = elem[:-1]
url_parse = urlparse(elem, 'http')
#URL parse
scheme = url_parse.scheme
netloc = url_parse.netloc or url_parse.path
path = url_parse.path if url_parse.netloc else ''
if not netloc.startswith('www.'):
netloc = 'www.' + netloc
try:
#Check if URL
check_url(scheme, netloc, path, url_parse, output)
except:
continue
#else:
#do something
def update_google_sheet(file, path_output, list_urls, list_urls_check, note, error):
#Get the index from the file being processed in the google sheet
index = df.index[df['File Name CitationSaver System']==file].tolist()
if not error:
#Check if columns are empty for the present row
if pd.isnull(df.at[index[0], 'Results URLs File Path']) and | pd.isnull(df.at[index[0], 'Results URLs without check']) | pandas.isnull |
'''
Created on Jun 8, 2017
@author: husensofteng
'''
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_pdf import PdfPages
import pybedtools
from pybedtools.bedtool import BedTool
from matplotlib.pyplot import tight_layout
import matplotlib.pyplot as plt
from pylab import gca
import pandas as pd
import math
import numpy as np
from decimal import Decimal
import os, sys
import seaborn as sns
import operator
import argparse
sns.set(style="ticks")
#plt.style.use('ggplot')
sns.set_style("white")
sns.set_context("paper")#talk
from utils import *
def get_mut_df(input='', x_col_index=5, y_col_index=8, x_col_name = 'Cancer types', y_col_name='Mutation Frequency (log10)'):
if os.path.isfile(str(input)):
names = [x_col_name, y_col_name]
if x_col_index>y_col_index:
names = [y_col_name, x_col_name]
df = pd.read_table(input, sep='\t', header=None, usecols=[x_col_index, y_col_index], names=names)
if x_col_name=="Chromatin States":
df[x_col_name] = df[x_col_name].apply(get_unique_state).apply(replace_state)
return df
def plot_boxplot(df, x_col_name = 'Cancer types', y_col_name='Mutation Frequency (log10)', title="",
groups_colors_dict=None, order=None, rotation=90, fig_width=8, fig_height=6, log=False
):
plt.clf()
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.add_subplot(111)
df['counts'] = [1 for i in range(0,len(df))]
dfg = df.groupby(by=[x_col_name,y_col_name])
counts = []
if log:
counts = dfg.size().apply(math.log10).rename('counts').tolist()
else:
counts = dfg.size().rename('counts').tolist()
categories = []
for c in dfg[x_col_name]:
categories.append(c[0][0])
sns.boxplot(y = counts, x=categories, palette=groups_colors_dict, ax=ax, order=order)
ax.set_xticklabels(ax.get_xticklabels(), rotation=rotation)
ax.set_ylabel(y_col_name)
ax.set_title(label=title, loc='left')
sns.despine()
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
return fig
def plot_heatmap(df, x_col_name, y_col_name, fig_width=8, fig_height=6, title="", rotation=90, threshold_to_include_element=500):
plt.clf()
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.add_subplot(111)
df_pivot_filtered = pd.DataFrame()
for c in df.columns:
if df[c].sum()>threshold_to_include_element:
df_pivot_filtered[c] = df[c]
cbar_ax = fig.add_axes([.75, 0.85, .2, .03])
sns.heatmap(df_pivot_filtered, ax=ax, square=True, cbar_ax=cbar_ax, cbar=True, cbar_kws={"orientation": "horizontal"}, cmap = "YlGnBu" )
ax.set_xticklabels(ax.get_xticklabels(), rotation=rotation)
ax.set_ylabel(y_col_name)
ax.set_title(label=title, loc='left')
sns.despine()
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
return fig
def get_df_from_elements(input_file='', col_to_use='RegMuts', sep='#', x_col_index=5, y_col_index=8, x_col_name = 'Cancer types', y_col_name='Mutation Frequency (log10)',
col_to_check='#Samples(RegMuts)', threshold=1):
elements_input = pd.read_table(input_file, sep='\t', skiprows=6, header=0, usecols=[col_to_check,col_to_use])
elements_input = elements_input[(elements_input['#Samples(RegMuts)']>threshold)]
box_plot_list = []
heatmap_dict = {}
for i, l in elements_input[col_to_use].iteritems():
for m in l.split(','):
x_col_value = m.split(sep)[x_col_index].split('_')[-1]
if x_col_name=="Chromatin States":
x_col_value = replace_state(x_col_value.split('_')[-1])
y_col_value = m.split('#')[y_col_index].split('_')[0]
box_plot_list.append([x_col_value,y_col_value])
try:
heatmap_dict[y_col_value][x_col_value] += 1
except KeyError:
try:
heatmap_dict[y_col_value][x_col_value] = 1
except KeyError:
heatmap_dict[y_col_value] = {x_col_value: 1}
box_plot_df = pd.DataFrame(box_plot_list, columns=[x_col_name, y_col_name])
heatmap_df = | pd.DataFrame(heatmap_dict) | pandas.DataFrame |
import os
import logging.config
import pandas as pd
from omegaconf import DictConfig
import hydra
from src.entities.predict_pipeline_params import PredictingPipelineParams, \
PredictingPipelineParamsSchema
from src.models import make_prediction
from src.utils import read_data, load_pkl_file
logger = logging.getLogger("ml_project/predict_pipeline")
def predict_pipeline(evaluating_pipeline_params: PredictingPipelineParams):
logger.info("Start prediction pipeline")
data = read_data(evaluating_pipeline_params.input_data_path)
logger.info(f"Dataset shape is {data.shape}")
logger.info("Loading transformer...")
transformer = load_pkl_file(evaluating_pipeline_params.pipeline_path)
transformed_data = pd.DataFrame(transformer.transform(data))
logger.info("Loading model...")
model = load_pkl_file(evaluating_pipeline_params.model_path)
logger.info("Start prediction")
predicts = make_prediction(
model,
transformed_data,
)
df_predicts = | pd.DataFrame(predicts) | pandas.DataFrame |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
from tempfile import mkdtemp, mkstemp
from os.path import exists, join, basename
from os import remove, close, mkdir
from functools import partial
from shutil import rmtree
import pandas as pd
from qiita_core.util import qiita_test_checker
from qiita_db.util import (get_db_files_base_dir, get_mountpoint,
convert_to_id, get_count)
from qiita_db.data import RawData, PreprocessedData
from qiita_db.study import Study
from qiita_db.parameters import (PreprocessedIlluminaParams,
ProcessedSortmernaParams,
Preprocessed454Params)
from qiita_db.metadata_template import PrepTemplate
from qiita_ware.processing_pipeline import (_get_preprocess_fastq_cmd,
_get_preprocess_fasta_cmd,
_insert_preprocessed_data,
generate_demux_file,
_get_qiime_minimal_mapping,
_get_process_target_gene_cmd,
_insert_processed_data_target_gene)
@qiita_test_checker()
class ProcessingPipelineTests(TestCase):
def setUp(self):
self.db_dir = get_db_files_base_dir()
self.files_to_remove = []
self.dirs_to_remove = []
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
for dp in self.dirs_to_remove:
if exists(dp):
rmtree(dp)
def test_get_qiime_minimal_mapping_single(self):
prep_template = PrepTemplate(1)
out_dir = mkdtemp()
obs_fps = _get_qiime_minimal_mapping(prep_template, out_dir)
exp_fps = [join(out_dir, 's_G1_L001_sequences_MMF.txt')]
# Check that the returned list is as expected
self.assertEqual(obs_fps, exp_fps)
# Check that the file exists
self.assertTrue(exists(exp_fps[0]))
# Check the contents of the file
with open(exp_fps[0], "U") as f:
self.assertEqual(f.read(), EXP_PREP)
def test_get_qiime_minimal_mapping_multiple(self):
# We need to create a prep template in which we have different run
# prefix values, so we can test this case
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 1',
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAA',
'experiment_design_description': 'BBB'},
'SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 2',
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAA',
'experiment_design_description': 'BBB'},
'SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 3',
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAA',
'experiment_design_description': 'BBB'}
}
md_template = | pd.DataFrame.from_dict(metadata_dict, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
def set_to_nan(array, n_nans):
array = array[:]
n = array.shape[0]
nan_indices = np.random.choice(np.arange(n), size=n_nans, replace=False)
array[nan_indices] = np.nan
return array
n_timesteps = 10000
x = np.linspace(0, 20, n_timesteps)
observable_1 = 10 * np.sin(2 * np.pi * x) + 20
observable_1 += 2 * np.cos(np.pi * x)
observable_1 += 2 * np.cos(1.3 * np.pi * x)
observable_1 += 1.2 * x
observable_2 = 9.2 * np.sin(2.2 * np.pi * x) + 24
observable_2 += 2.1 * np.cos(0.9 * np.pi * x)
observable_2 += 1.9 * np.cos(1.4 * np.pi * x)
observable_2 += 1.1 * x
target = 0.83 * observable_1 + 1.2 * observable_2
n_to_nan = int(n_timesteps * 0.05)
n_to_nan2 = int(n_timesteps * 0.08)
observable_1 = set_to_nan(observable_1, n_to_nan)
observable_2 = set_to_nan(observable_2, n_to_nan2)
data = np.zeros((n_timesteps, 3))
data[:, 0] = target
data[:, 1] = observable_1
data[:, 2] = observable_2
time = | pd.date_range(start="1/1/2008", end="1/1/2015", periods=n_timesteps) | pandas.date_range |
#!/usr/bin/env python3
import pandas as pd
from os import path
cutoffs = pd.read_csv("data/ores_rcfilters_cutoffs.csv")
wikis = set(cutoffs.wiki_db)
sets = []
for wiki_db in wikis:
scores_file = "data/quarry_ores_scores/{0}_scores.csv".format(wiki_db)
if path.exists(scores_file):
scores = | pd.read_csv(scores_file) | pandas.read_csv |
import os
import sys
import inspect
import argparse
import importlib.util
from os import listdir
from os.path import isfile, join
# Lambda functions cannot raise exceptions so using higher order functions.
def _raise(e):
def raise_helper():
raise e
return raise_helper
from harvest.utils import debugger
from harvest.storage.base_storage import BaseStorage
from harvest.storage.csv_storage import CSVStorage
from harvest.storage.pickle_storage import PickleStorage
# For imports that have extra dependencies, surpress the import error unless the user specifics that resource and does not have the depenedencies.
try:
from harvest.storage.database_storage import DBStorage
except ModuleNotFoundError as e:
DBStorage = _raise(e)
from harvest.api.dummy import DummyStreamer
from harvest.api.yahoo import YahooStreamer
from harvest.api.polygon import PolygonStreamer
from harvest.api.paper import PaperBroker
try:
from harvest.api.robinhood import Robinhood
except ModuleNotFoundError as e:
Robinhood = _raise(e)
try:
from harvest.api.alpaca import Alpaca
except ModuleNotFoundError as e:
Alpaca = _raise(e)
try:
from harvest.api.kraken import Kraken
except ModuleNotFoundError as e:
Kraken = _raise(e)
try:
from harvest.api.webull import Webull
except ModuleNotFoundError as e:
Webull = _raise(e)
from harvest.trader import LiveTrader
from harvest.algo import BaseAlgo
storages = {
"memory": BaseStorage,
"csv": CSVStorage,
"pickle": PickleStorage,
"db": DBStorage,
}
streamers = {
"dummy": DummyStreamer,
"yahoo": YahooStreamer,
"polygon": PolygonStreamer,
"robinhood": Robinhood,
"alpaca": Alpaca,
"kraken": Kraken,
"webull": Webull,
}
brokers = {
"paper": PaperBroker,
"robinhood": Robinhood,
"alpaca": Alpaca,
"kraken": Kraken,
"webull": Webull,
}
parser = argparse.ArgumentParser(description="Harvest CLI")
subparsers = parser.add_subparsers(dest="command")
# Parser for starting harvest
start_parser = subparsers.add_parser("start")
start_parser.add_argument(
"-o",
"--storage",
default="memory",
help="the way to store asset data",
choices=list(storages.keys()),
)
start_parser.add_argument(
"-s",
"--streamer",
default="yahoo",
help="fetches asset data",
choices=list(streamers.keys()),
)
start_parser.add_argument(
"-b",
"--broker",
default="streamer",
help="buys and sells assets on your behalf",
choices=list(brokers.keys()),
)
# Directory with algos that you want to run, default is the current working directory.
start_parser.add_argument(
"-d",
"--directory",
default=".",
help="directory where algorithms are located",
)
start_parser.add_argument(
"--debug", default=False, action=argparse.BooleanOptionalAction
)
# Parser for visualing data
visualize_parser = subparsers.add_parser("visualize")
visualize_parser.add_argument("path", help="path to harvest generated data file")
def main():
"""
Entrypoint which parses the command line arguments. Calls subcommands based on which subparser was used.
:args: A Namespace object containing parsed user arguments.
"""
args = parser.parse_args()
# Handles the start command
if args.command == "start":
start(args)
elif args.command == "visualize":
visualize(args)
# Show help if case not found
else:
parser.print_help(sys.stderr)
sys.exit(1)
def start(args: argparse.Namespace, test: bool = False):
"""
Starts the Harvest LiveTrader with the given storage, streamer, broker, and algos specified.
:args: A Namespace object containing parsed user arguments.
:test: True if we are testing so that we can exit this function cleanly.
"""
storage = _get_storage(args.storage)
streamer = _get_streamer(args.streamer)
broker = _get_broker(args.broker, args.streamer, streamer)
debug = args.debug
trader = LiveTrader(streamer=streamer, broker=broker, storage=storage, debug=debug)
# Get the directories.
directory = args.directory
debugger.info(f"🕵 Searching directory {directory}")
files = [fi for fi in listdir(directory) if isfile(join(directory, fi))]
debugger.info(f"🎉 Found files {files}")
# For each file in the directory...
for f in files:
names = f.split(".")
# Filter out non-python files.
if len(names) <= 1 or names[-1] != "py":
continue
name = "".join(names[:-1])
# ...open it...
with open(join(directory, f), "r") as algo_file:
firstline = algo_file.readline()
if firstline.find("HARVEST_SKIP") != -1:
debugger.info(f"ℹ Skipping {f}")
continue
# ...load in the entire file and add the algo to the trader.
algo_path = os.path.realpath(join(directory, f))
spec = importlib.util.spec_from_file_location(name, algo_path)
algo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(algo)
# Iterate though the variables and if a variable is a subclass of BaseAlgo instantiate it and added to the trader.
for algo_cls in inspect.getmembers(algo):
k, v = algo_cls[0], algo_cls[1]
if inspect.isclass(v) and v != BaseAlgo and issubclass(v, BaseAlgo):
debugger.info(f"🎉 Found algo {k} in {f}, adding to trader")
trader.add_algo(v())
if not test:
debugger.info(f"🎊 Starting trader")
trader.start()
def visualize(args: argparse.Namespace):
"""
Read a csv or pickle file created by Harvest with ohlc data and graph the data.
:args: A Namespace object containing parsed user arguments.
"""
import re
import pandas as pd
import mplfinance as mpf
# Open the file using the appropriate parser.
if args.path.endswith(".csv"):
df = pd.read_csv(args.path)
df["timestamp"] = | pd.to_datetime(df["timestamp"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import sys
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl import Workbook
import argparse
import re
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
import warnings
import os
import math
import datetime
from future.utils import itervalues, iteritems
import json
import requests
from glob2 import glob
import matplotlib.pyplot as plt
from string import printable
def configure_for_printing(
df,
Result_df_mean_1mps,
Result_df_mean_05mps,
Result_df_std_1mps,
Result_df_std_05mps,
Result_df_mean_tiRef,
Result_df_std_tiRef,
stabilityClass=False,
byTIRefbin=False,
):
if isinstance(df, list) == False:
pass
else:
for val in df:
for i in val:
if isinstance(i, pd.DataFrame) == False:
pass
else:
try:
dat = i[i.columns.to_list()[0][0]]
stats = list(set([tup[0] for tup in dat.columns.to_list()]))
for s in stats:
try:
new_data = dat[s].add_prefix(str(s + "_"))
if stabilityClass:
new_index = [
str("stability_" + stabilityClass + "_" + n)
for n in new_data.index.to_list()
]
new_data.index = new_index
if s == "mean" and new_data.columns.name == "bins":
Result_df_mean_1mps = pd.concat(
[Result_df_mean_1mps, new_data]
)
elif s == "mean" and new_data.columns.name == "bins_p5":
Result_df_mean_05mps = pd.concat(
[Result_df_mean_05mps, new_data]
)
elif s == "std" and new_data.columns.name == "bins":
Result_df_std_1mps = pd.concat(
[Result_df_std_1mps, new_data]
)
elif s == "std" and new_data.columns.name == "bins_p5":
Result_df_std_05mps = pd.concat(
[Result_df_std_05mps, new_data]
)
except:
print(
str(
"No data to write in one of the dataframes of results"
)
)
rowNumber += 1
except:
pass
if byTIRefbin:
if isinstance(df, list) == False:
pass
else:
for val in df:
for i in val:
if isinstance(i, pd.DataFrame) == False:
pass
else:
try:
dat = i[i.columns.to_list()[0][0]]
stats = list(set([tup[0] for tup in dat.columns.to_list()]))
for s in stats:
try:
new_data = dat[s].add_prefix(str(s + "_"))
if stabilityClass:
new_index = [
str("stability_" + stabilityClass + "_" + n)
for n in new_data.index.to_list()
]
new_data.index = new_index
if (
s == "mean"
and new_data.columns.name == "RefTI_bins"
):
Result_df_mean_tiRef = pd.concat(
[Result_df_mean_tiRef, new_data]
)
elif (
s == "std"
and new_data.columns.name == "RefTI_bins"
):
Result_df_std_tiRef = pd.concat(
[Result_df_std_tiRef, new_data]
)
except:
print(
str(
"No data to write in one of the dataframes of results"
)
)
rowNumber += 1
except:
pass
else:
Result_df_mean_tiRef = Result_df_mean_tiRef
Result_df_std_tiRef = Result_df_std_tiRef
return (
Result_df_mean_1mps,
Result_df_mean_05mps,
Result_df_std_1mps,
Result_df_std_05mps,
Result_df_mean_tiRef,
Result_df_std_tiRef,
)
def write_resultstofile(df, ws, r_start, c_start):
# write the regression results to file.
rows = dataframe_to_rows(df)
for r_idx, row in enumerate(rows, r_start):
for c_idx, value in enumerate(row, c_start):
try:
ws.cell(row=r_idx, column=c_idx, value=value)
except ValueError:
ws.cell(row=r_idx, column=c_idx, value=value[0])
def write_all_resultstofile(
reg_results,
baseResultsLists,
count_1mps,
count_05mps,
count_1mps_train,
count_05mps_train,
count_1mps_test,
count_05mps_test,
name_1mps_tke,
name_1mps_alpha_Ane,
name_1mps_alpha_RSD,
name_05mps_tke,
name_05mps_alpha_Ane,
name_05mps_alpha_RSD,
count_05mps_tke,
count_05mps_alpha_Ane,
count_05mps_alpha_RSD,
count_1mps_tke,
count_1mps_alpha_Ane,
count_1mps_alpha_RSD,
results_filename,
siteMetadata,
filterMetadata,
Timestamps,
timestamp_train,
timestamp_test,
regimeBreakdown_tke,
regimeBreakdown_ane,
regimeBreakdown_rsd,
Ht_1_ane,
Ht_2_ane,
extrap_metadata,
reg_results_class1,
reg_results_class2,
reg_results_class3,
reg_results_class4,
reg_results_class5,
reg_results_class1_alpha,
reg_results_class2_alpha,
reg_results_class3_alpha,
reg_results_class4_alpha,
reg_results_class5_alpha,
Ht_1_rsd,
Ht_2_rsd,
ResultsLists_stability,
ResultsLists_stability_alpha_RSD,
ResultsLists_stability_alpha_Ane,
stabilityFlag,
cup_alphaFlag,
RSD_alphaFlag,
TimeTestA_baseline_df,
TimeTestB_baseline_df,
TimeTestC_baseline_df,
time_test_A_adjustment_df,
time_test_B_adjustment_df,
time_test_C_adjustment_df,
):
wb = Workbook()
ws = wb.active
Dist_stats_df = pd.DataFrame()
# all baseline regressions
# ------------------------
a = wb.create_sheet(title="Baseline Results")
rowNumber = 1
write_resultstofile(reg_results, a, rowNumber, 1)
rowNumber += len(reg_results) + 3
col = 1
if stabilityFlag:
write_resultstofile(reg_results_class1[0], a, rowNumber, col)
rowNumber2 = rowNumber + len(reg_results) + 3
write_resultstofile(reg_results_class2[0], a, rowNumber2, col)
rowNumber3 = rowNumber2 + len(reg_results) + 3
write_resultstofile(reg_results_class3[0], a, rowNumber3, col)
rowNumber4 = rowNumber3 + len(reg_results) + 3
write_resultstofile(reg_results_class4[0], a, rowNumber4, col)
rowNumber5 = rowNumber4 + len(reg_results) + 3
write_resultstofile(reg_results_class5[0], a, rowNumber5, col)
for i in range(1, len(reg_results_class1)):
col += reg_results_class1[0].shape[1] + 2
write_resultstofile(reg_results_class1[i], a, rowNumber, col)
write_resultstofile(reg_results_class2[i], a, rowNumber2, col)
write_resultstofile(reg_results_class3[i], a, rowNumber3, col)
write_resultstofile(reg_results_class4[i], a, rowNumber4, col)
write_resultstofile(reg_results_class5[i], a, rowNumber5, col)
rowNumber = rowNumber5 + len(reg_results) + 3
if cup_alphaFlag:
write_resultstofile(reg_results_class1_alpha["Ane"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class1_alpha["Ane"]) + 3
write_resultstofile(reg_results_class2_alpha["Ane"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class2_alpha["Ane"]) + 3
write_resultstofile(reg_results_class3_alpha["Ane"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class3_alpha["Ane"]) + 3
write_resultstofile(reg_results_class4_alpha["Ane"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class4_alpha["Ane"]) + 3
write_resultstofile(reg_results_class5_alpha["Ane"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class5_alpha["Ane"]) + 3
if RSD_alphaFlag:
write_resultstofile(reg_results_class1_alpha["RSD"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class1_alpha["RSD"]) + 3
write_resultstofile(reg_results_class2_alpha["RSD"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class2_alpha["RSD"]) + 3
write_resultstofile(reg_results_class3_alpha["RSD"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class3_alpha["RSD"]) + 3
write_resultstofile(reg_results_class4_alpha["RSD"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class4_alpha["RSD"]) + 3
write_resultstofile(reg_results_class5_alpha["RSD"], a, rowNumber, 1)
rowNumber = rowNumber + len(reg_results_class5_alpha["RSD"]) + 3
# total bin counts and length of observations
totalcount_1mps = count_1mps.sum().sum()
totalcount_1mps_train = count_1mps_train.sum().sum()
totalcount_1mps_test = count_1mps_test.sum().sum()
totalcount_05mps = count_05mps.sum().sum()
totalcount_05mps_train = count_05mps_train.sum().sum()
totalcount_05mps_test = count_05mps_test.sum().sum()
name_1mps_tke = []
name_1mps_alpha_Ane = []
name_1mps_alpha_RSD = []
rowNumber = rowNumber + 2
a.cell(row=rowNumber, column=1, value="Total Count (number of observations)")
a.cell(row=rowNumber, column=2, value=totalcount_1mps)
a.cell(
row=rowNumber,
column=4,
value="Total Count in Training Subset (number of observations)",
)
a.cell(row=rowNumber, column=5, value=totalcount_1mps_train)
a.cell(
row=rowNumber,
column=7,
value="Total Count in Training Subset (number of observations)",
)
a.cell(row=rowNumber, column=8, value=totalcount_1mps_test)
rowNumber += 2
a.cell(row=rowNumber, column=1, value="Bin Counts")
rowNumber += 1
c_1mps = count_1mps["RSD_WS"]["count"]
c_1mps.index = ["count"]
write_resultstofile(c_1mps, a, rowNumber, 1)
rowNumber += 4
c_05mps = count_05mps["RSD_WS"]["count"]
c_05mps.index = ["count"]
write_resultstofile(c_05mps, a, rowNumber, 1)
rowNumber += 4
a.cell(row=rowNumber, column=1, value="Bin Counts (Train)")
rowNumber += 1
c_1mps_train = count_1mps_train["RSD_WS"]["count"]
c_1mps_train.index = ["count"]
write_resultstofile(c_1mps_train, a, rowNumber, 1)
rowNumber += 4
c_05mps_train = count_05mps_train["RSD_WS"]["count"]
c_05mps_train.index = ["count"]
write_resultstofile(c_05mps_train, a, rowNumber, 1)
rowNumber += 4
a.cell(row=rowNumber, column=1, value="Bin Counts (Test)")
rowNumber += 1
c_1mps_test = count_1mps_test["RSD_WS"]["count"]
c_1mps_test.index = ["count"]
write_resultstofile(c_1mps_test, a, rowNumber, 1)
rowNumber += 4
c_05mps_test = count_05mps_test["RSD_WS"]["count"]
c_05mps_test.index = ["count"]
write_resultstofile(c_05mps_test, a, rowNumber, 1)
rowNumber += 4
for c in range(0, len(count_1mps_tke)):
a.cell(row=rowNumber, column=1, value=str("Bin Counts TKE" + str(c + 1)))
rowNumber += 1
try:
c_1mps_test = count_1mps_tke[c]["RSD_WS"]["count"]
c_1mps_test.index = ["count"]
except:
c_1mps_test = pd.DataFrame()
write_resultstofile(c_1mps_test, a, rowNumber, 1)
rowNumber += 4
try:
c_05mps_test = count_05mps_tke[c]["RSD_WS"]["count"]
c_05mps_test.index = ["count"]
except:
c_05mps_test = pd.DataFrame()
write_resultstofile(c_05mps_test, a, rowNumber, 1)
rowNumber += 4
for c in range(0, len(count_1mps_alpha_Ane)):
a.cell(row=rowNumber, column=1, value=str("Bin Counts alpha Ane" + str(c + 1)))
rowNumber += 1
try:
c_1mps_test = count_1mps_alpha_Ane[c]["RSD_WS"]["count"]
c_1mps_test.index = ["count"]
except:
c_1mps_test = pd.DataFrame()
write_resultstofile(c_1mps_test, a, rowNumber, 1)
rowNumber += 4
try:
c_05mps_test = count_05mps_alpha_Ane[c]["RSD_WS"]["count"]
c_05mps_test.index = ["count"]
except:
c_05mps_test = pd.DataFrame()
write_resultstofile(c_05mps_test, a, rowNumber, 1)
rowNumber += 4
for c in range(0, len(count_1mps_alpha_RSD)):
a.cell(row=rowNumber, column=1, value=str("Bin Counts alpha RSD" + str(c + 1)))
rowNumber += 1
try:
c_1mps_test = count_1mps_alpha_RSD[c]["RSD_WS"]["count"]
c_1mps_test.index = ["count"]
except:
c_1mps_test = pd.DataFrame()
write_resultstofile(c_1mps_test, a, rowNumber, 1)
rowNumber += 4
try:
c_05mps_test = count_05mps_alpha_RSD[c]["RSD_WS"]["count"]
c_05mps_test.index = ["count"]
except:
c_05mps_test = pd.DataFrame()
write_resultstofile(c_05mps_test, a, rowNumber, 1)
rowNumber += 4
totalcount_1mps_alpha_Ane = []
totalcount_1mps_alpha_RSD = []
totalcountTime_days = (totalcount_1mps * 10) / 60 / 24
a.cell(row=rowNumber, column=1, value="Total Time (days)")
a.cell(row=rowNumber, column=2, value=totalcountTime_days)
totalcountTime_days_train = (totalcount_1mps_train * 10) / 60 / 24
a.cell(row=rowNumber, column=4, value="Total Time Train (days)")
a.cell(row=rowNumber, column=5, value=totalcountTime_days_train)
totalcountTime_days_test = (totalcount_1mps_test * 10) / 60 / 24
a.cell(row=rowNumber, column=7, value="Total Time Test (days)")
a.cell(row=rowNumber, column=8, value=totalcountTime_days_test)
rowNumber += 2
a.cell(row=rowNumber, column=1, value="Start Timestamp")
a.cell(row=rowNumber, column=2, value=str(Timestamps[0]))
a.cell(row=rowNumber, column=4, value="Start Timestamp (Train)")
a.cell(row=rowNumber, column=5, value=str(timestamp_train))
a.cell(row=rowNumber, column=7, value="Start Timestamp (Test)")
try:
timestamp_minus1 = str(timestamp_test[-1])
except:
timestamp_minus1 = np.nan
a.cell(row=rowNumber, column=8, value=timestamp_minus1)
a.cell(row=rowNumber + 1, column=1, value="End Timestamp")
a.cell(row=rowNumber + 1, column=2, value=str(Timestamps[-1]))
a.cell(row=rowNumber + 1, column=4, value="End Timestamp (Train)")
try:
timestamp_train_ts = str(timestamp_test[-1])
except:
timestamp_train_ts = np.nan
a.cell(row=rowNumber + 1, column=5, value=timestamp_train_ts)
a.cell(row=rowNumber + 1, column=7, value="End Timestamp (Test)")
a.cell(row=rowNumber + 1, column=8, value=timestamp_minus1)
rowNumber += 3
if stabilityFlag:
a.cell(row=rowNumber, column=1, value="Stability TKE")
rowNumber += 1
write_resultstofile(regimeBreakdown_tke, a, rowNumber, 1)
rowNumber += 9
if cup_alphaFlag:
a.cell(row=rowNumber, column=1, value="Stability alpha: tower")
rowNumber += 1
a.cell(row=rowNumber, column=1, value="Heights for alpha calculation: tower")
a.cell(row=rowNumber, column=2, value=Ht_1_ane)
a.cell(row=rowNumber, column=3, value=Ht_2_ane)
rowNumber += 2
write_resultstofile(regimeBreakdown_ane, a, rowNumber, 1)
rowNumber += 8
if RSD_alphaFlag:
a.cell(row=rowNumber, column=1, value="Stability alpha: RSD")
rowNumber += 1
a.cell(row=rowNumber, column=1, value="Heights for alpha calculation: RSD")
a.cell(row=rowNumber, column=2, value=Ht_1_rsd)
a.cell(row=rowNumber, column=3, value=Ht_2_rsd)
rowNumber += 2
write_resultstofile(regimeBreakdown_rsd, a, rowNumber, 1)
rowNumber += 7
# Metadata
# --------
b = wb.create_sheet(title="Metadata")
rowNumber = 1
b.cell(row=rowNumber, column=1, value="Software version: ")
b.cell(row=rowNumber, column=2, value="1.1.0")
b.cell(row=rowNumber, column=3, value=datetime.datetime.now())
rowNumber += 3
for r in dataframe_to_rows(siteMetadata, index=False):
b.append(r)
rowNumber = rowNumber + 1
rowNumber += 2
b.cell(row=rowNumber, column=1, value="Filter Metadata")
rowNumber += 1
write_resultstofile(filterMetadata, b, rowNumber, 1)
rowNumber += 2
b.cell(row=rowNumber, column=1, value="Extrapolation Metadata")
rowNumber += 1
write_resultstofile(extrap_metadata, b, rowNumber, 1)
rowNumber += 9
b.cell(row=rowNumber, column=1, value="Adjustments Metadata")
rowNumber += 1
for c in baseResultsLists["adjustmentTagList_"]:
b.cell(row=rowNumber, column=1, value="Adjustment applied:")
b.cell(row=rowNumber, column=2, value=c)
rowNumber += 1
# Time Sensitivity Tests
# ----------------------
# TimeTestA, TimeTestB, TimeTestC
Ta = wb.create_sheet(title="Sensitivity2TestLengthA")
rowNumber = 1
Ta.cell(row=rowNumber, column=1, value="baseline")
rowNumber += 1
write_resultstofile(TimeTestA_baseline_df, Ta, rowNumber, 1)
rowNumber += (len(TimeTestA_baseline_df)) + 4
for key in time_test_A_adjustment_df:
Ta.cell(row=rowNumber, column=1, value=key)
rowNumber += 1
write_resultstofile(time_test_A_adjustment_df[key], Ta, rowNumber, 1)
rowNumber += len(time_test_A_adjustment_df[key]) + 3
Tb = wb.create_sheet(title="Sensitivity2TestLengthB")
rowNumber = 1
Tb.cell(row=rowNumber, column=1, value="baseline")
rowNumber += 1
write_resultstofile(TimeTestB_baseline_df, Tb, rowNumber, 1)
rowNumber += len(TimeTestB_baseline_df)
for key in time_test_B_adjustment_df:
Tb.cell(row=rowNumber, column=1, value=key)
rowNumber += 1
write_resultstofile(time_test_B_adjustment_df[key], Tb, rowNumber, 1)
rowNumber += len(time_test_B_adjustment_df[key]) + 3
Tc = wb.create_sheet(title="Sensitivity2TestLengthC")
rowNumber = 1
Tc.cell(row=rowNumber, column=1, value="baseline")
rowNumber += 1
write_resultstofile(TimeTestC_baseline_df, Tc, rowNumber, 1)
rowNumber += len(TimeTestC_baseline_df)
for key in time_test_C_adjustment_df:
Tc.cell(row=rowNumber, column=1, value=key)
rowNumber += 1
write_resultstofile(time_test_C_adjustment_df[key], Tc, rowNumber, 1)
rowNumber += len(time_test_C_adjustment_df[key]) + 3
# record results for each adjustment method
# -----------------------------------------
for adjustment in baseResultsLists[
"adjustmentTagList_"
]: # create tab for each adjustment method
sheetName = adjustment
for i in baseResultsLists["adjustmentTagList_"]:
if i == adjustment:
idx = baseResultsLists["adjustmentTagList_"].index(i)
TI_MBE_j_ = baseResultsLists["TI_MBEList_"][idx]
TI_Diff_j_ = baseResultsLists["TI_DiffList_"][idx]
TI_Diff_r_ = baseResultsLists["TI_DiffRefBinsList_"][idx]
TI_RMSE_j_ = baseResultsLists["TI_RMSEList_"][idx]
RepTI_MBE_j_ = baseResultsLists["RepTI_MBEList_"][idx]
RepTI_Diff_j_ = baseResultsLists["RepTI_DiffList_"][idx]
RepTI_Diff_r_ = baseResultsLists["RepTI_DiffRefBinsList_"][idx]
RepTI_RMSE_j_ = baseResultsLists["RepTI_RMSEList_"][idx]
rep_TI_results_1mps = baseResultsLists["rep_TI_results_1mps_List_"][idx]
rep_TI_results_05mps = baseResultsLists["rep_TI_results_05mps_List_"][idx]
TIbybin = baseResultsLists["TIBinList_"][idx]
TIbyRefbin = baseResultsLists["TIRefBinList_"][idx]
total_stats = baseResultsLists["total_StatsList_"][idx]
belownominal_stats = baseResultsLists["belownominal_statsList_"][idx]
abovenominal_stats = baseResultsLists["abovenominal_statsList_"][idx]
lm_adj = baseResultsLists["lm_adjList_"][idx]
Dist_stats_df = pd.concat(
[Dist_stats_df, baseResultsLists["Distribution_statsList_"][idx]], axis=1
)
adjustmentTag = baseResultsLists["adjustmentTagList_"][idx]
if stabilityFlag:
TI_MBE_j_stability = ResultsLists_stability["TI_MBEList_stability_"][idx]
TI_Diff_j_stability = ResultsLists_stability["TI_DiffList_stability_"][idx]
TI_Diff_r_stability = ResultsLists_stability[
"TI_DiffRefBinsList_stability_"
][idx]
TI_RMSE_j_stability = ResultsLists_stability["TI_RMSEList_stability_"][idx]
RepTI_MBE_j_stability = ResultsLists_stability["RepTI_MBEList_stability_"][
idx
]
RepTI_Diff_j_stability = ResultsLists_stability[
"RepTI_DiffList_stability_"
][idx]
RepTI_Diff_r_stability = ResultsLists_stability[
"RepTI_DiffRefBinsList_stability_"
][idx]
RepTI_RMSE_j_stability = ResultsLists_stability[
"RepTI_RMSEList_stability_"
][idx]
rep_TI_results_1mps_stability = ResultsLists_stability[
"rep_TI_results_1mps_List_stability_"
][idx]
rep_TI_results_05mps_stability = ResultsLists_stability[
"rep_TI_results_05mps_List_stability_"
][idx]
TIbybin_stability = ResultsLists_stability["TIBinList_stability_"][idx]
TIbyRefbin_stability = ResultsLists_stability["TIRefBinList_stability_"][
idx
]
total_stats_stability = ResultsLists_stability[
"total_StatsList_stability_"
][idx]
belownominal_stats_stability = ResultsLists_stability[
"belownominal_statsList_stability_"
][idx]
abovenominal_stats_stability = ResultsLists_stability[
"abovenominal_statsList_stability_"
][idx]
lm_adj_stability = ResultsLists_stability["lm_adjList_stability_"][idx]
adjustmentTag_stability = ResultsLists_stability[
"adjustmentTagList_stability_"
][idx]
for i in ResultsLists_stability["Distribution_statsList_stability_"][idx]:
if isinstance(i, pd.DataFrame):
Dist_stats_df = pd.concat([Dist_stats_df, i], axis=1)
if cup_alphaFlag:
TI_MBE_j_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"TI_MBEList_stability_alpha_Ane"
][idx]
TI_Diff_j_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"TI_DiffList_stability_alpha_Ane"
][idx]
TI_Diff_r_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"TI_DiffRefBinsList_stability_alpha_Ane"
][idx]
TI_RMSE_j_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"TI_RMSEList_stability_alpha_Ane"
][idx]
RepTI_MBE_j_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"RepTI_MBEList_stability_alpha_Ane"
][idx]
RepTI_Diff_j_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"RepTI_DiffList_stability_alpha_Ane"
][idx]
RepTI_Diff_r_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"RepTI_DiffRefBinsList_stability_alpha_Ane"
][idx]
RepTI_RMSE_j_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"RepTI_RMSEList_stability_alpha_Ane"
][idx]
rep_TI_results_1mps_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"rep_TI_results_1mps_List_stability_alpha_Ane"
][idx]
rep_TI_results_05mps_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"rep_TI_results_05mps_List_stability_alpha_Ane"
][idx]
TIbybin_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"TIBinList_stability_alpha_Ane"
][idx]
TIbyRefbin_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"TIRefBinList_stability_alpha_Ane"
][idx]
total_stats_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"total_StatsList_stability_alpha_Ane"
][idx]
belownominal_stats_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"belownominal_statsList_stability_alpha_Ane"
][idx]
abovenominal_stats_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"abovenominal_statsList_stability_alpha_Ane"
][idx]
lm_adj_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"lm_adjList_stability_alpha_Ane"
][idx]
adjustmentTag_stability_alpha_Ane = ResultsLists_stability_alpha_Ane[
"adjustmentTagList_stability_alpha_Ane"
][idx]
for i in ResultsLists_stability_alpha_Ane[
"Distribution_statsList_stability_alpha_Ane"
][idx]:
if isinstance(i, pd.DataFrame):
Dist_stats_df = pd.concat([Dist_stats_df, i], axis=1)
if RSD_alphaFlag:
TI_MBE_j_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"TI_MBEList_stability_alpha_RSD"
][idx]
TI_Diff_j_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"TI_DiffList_stability_alpha_RSD"
][idx]
TI_Diff_r_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"TI_DiffRefBinsList_stability_alpha_RSD"
][idx]
TI_RMSE_j_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"TI_RMSEList_stability_alpha_RSD"
][idx]
RepTI_MBE_j_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"RepTI_MBEList_stability_alpha_RSD"
][idx]
RepTI_Diff_j_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"RepTI_DiffList_stability_alpha_RSD"
][idx]
RepTI_Diff_r_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"RepTI_DiffRefBinsList_stability_alpha_RSD"
][idx]
RepTI_RMSE_j_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"RepTI_RMSEList_stability_alpha_RSD"
][idx]
rep_TI_results_1mps_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"rep_TI_results_1mps_List_stability_alpha_RSD"
][idx]
rep_TI_results_05mps_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"rep_TI_results_05mps_List_stability_alpha_RSD"
][idx]
TIbybin_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"TIBinList_stability_alpha_RSD"
][idx]
TIbyRefbin_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"TIRefBinList_stability_alpha_RSD"
][idx]
total_stats_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"total_StatsList_stability_alpha_RSD"
][idx]
belownominal_stats_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"belownominal_statsList_stability_alpha_RSD"
][idx]
abovenominal_stats_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"abovenominal_statsList_stability_alpha_RSD"
][idx]
lm_adj_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"lm_adjList_stability_alpha_RSD"
][idx]
adjustmentTag_stability_alpha_RSD = ResultsLists_stability_alpha_RSD[
"adjustmentTagList_stability_alpha_RSD"
][idx]
for i in ResultsLists_stability_alpha_RSD[
"Distribution_statsList_stability_alpha_RSD"
][idx]:
if isinstance(i, pd.DataFrame):
Dist_stats_df = pd.concat([Dist_stats_df, i], axis=1)
ws = wb.create_sheet(title=sheetName)
rowNumber = 1
ws.cell(row=rowNumber, column=1, value="Adjusted RSD Regression Results")
ws.cell(row=rowNumber, column=2, value="m")
ws.cell(row=rowNumber, column=3, value="c")
ws.cell(row=rowNumber, column=4, value="r-squared")
ws.cell(row=rowNumber, column=5, value="mean difference")
ws.cell(row=rowNumber, column=6, value="mse")
ws.cell(row=rowNumber, column=7, value="rmse")
className = 1
if stabilityFlag:
for i in lm_adj_stability:
start = className * 8 + 1
adj_name = str(
"Adjusted RSD Regression Results, stability subset (TKE)"
+ "_"
+ "class_"
+ str(className)
)
ws.cell(row=rowNumber, column=start, value=adj_name)
ws.cell(row=rowNumber, column=start + 1, value="m")
ws.cell(row=rowNumber, column=start + 2, value="c")
ws.cell(row=rowNumber, column=start + 3, value="r-squared")
ws.cell(row=rowNumber, column=start + 4, value="mean difference")
ws.cell(row=rowNumber, column=start + 5, value="mse")
ws.cell(row=rowNumber, column=start + 6, value="rmse")
className += 1
className = 1
if cup_alphaFlag:
rowNumber = 13
for i in lm_adj_stability_alpha_Ane:
start = className * 8 + 1
adj_name = str(
"Adjusted RSD Regression Results, stability subset (cup alpha)"
+ "_"
+ "class_"
+ str(className)
)
ws.cell(row=rowNumber, column=start, value=adj_name)
ws.cell(row=rowNumber, column=start + 1, value="m")
ws.cell(row=rowNumber, column=start + 2, value="c")
ws.cell(row=rowNumber, column=start + 3, value="r-squared")
ws.cell(row=rowNumber, column=start + 4, value="mean difference")
ws.cell(row=rowNumber, column=start + 5, value="mse")
ws.cell(row=rowNumber, column=start + 6, value="rmse")
className += 1
className = 1
if RSD_alphaFlag:
rowNumber = 25
for i in lm_adj_stability_alpha_Ane:
start = className * 8 + 1
adj_name = str(
"Adjusted RSD Regression Results, stability subset (RSD alpha)"
+ "_"
+ "class_"
+ str(className)
)
ws.cell(row=rowNumber, column=start, value=adj_name)
ws.cell(row=rowNumber, column=start + 1, value="m")
ws.cell(row=rowNumber, column=start + 2, value="c")
ws.cell(row=rowNumber, column=start + 3, value="r-squared")
ws.cell(row=rowNumber, column=start + 4, value="mean difference")
ws.cell(row=rowNumber, column=start + 5, value="mse")
ws.cell(row=rowNumber, column=start + 6, value="rmse")
className += 1
# adjustment regression results
rowNumber = 2
for item in lm_adj.index.to_list():
ws.cell(row=rowNumber, column=1, value=item)
ws.cell(row=rowNumber, column=2, value=lm_adj["m"][item])
ws.cell(row=rowNumber, column=3, value=lm_adj["c"][item])
ws.cell(row=rowNumber, column=4, value=lm_adj["rsquared"][item])
ws.cell(row=rowNumber, column=5, value=lm_adj["difference"][item])
ws.cell(row=rowNumber, column=6, value=lm_adj["mse"][item])
ws.cell(row=rowNumber, column=7, value=lm_adj["rmse"][item])
rowNumber = rowNumber + 1
if stabilityFlag:
rowNumber = 2
className = 1
for i in range(0, len(lm_adj_stability)):
start = className * 8 + 1
try:
for item in lm_adj_stability[i].index.to_list():
ws.cell(row=rowNumber, column=start, value=item)
ws.cell(
row=rowNumber,
column=start + 1,
value=lm_adj_stability[i]["m"][item],
)
ws.cell(
row=rowNumber,
column=start + 2,
value=lm_adj_stability[i]["c"][item],
)
ws.cell(
row=rowNumber,
column=start + 3,
value=lm_adj_stability[i]["rsquared"][item],
)
ws.cell(
row=rowNumber,
column=start + 4,
value=lm_adj_stability[i]["difference"][item],
)
ws.cell(
row=rowNumber,
column=start + 5,
value=lm_adj_stability[i]["mse"][item],
)
ws.cell(
row=rowNumber,
column=start + 6,
value=lm_adj_stability[i]["rmse"][item],
)
rowNumber = rowNumber + 1
except:
pass
className = className + 1
rowNumber = 2
if cup_alphaFlag:
rowNumber = 14
className = 1
for i in range(0, len(lm_adj_stability_alpha_Ane)):
start = className * 8 + 1
try:
for item in lm_adj_stability_alpha_Ane[i].index.to_list():
ws.cell(row=rowNumber, column=start, value=item)
ws.cell(
row=rowNumber,
column=start + 1,
value=lm_adj_stability_alpha_Ane[i]["m"][item],
)
ws.cell(
row=rowNumber,
column=start + 2,
value=lm_adj_stability_alpha_Ane[i]["c"][item],
)
ws.cell(
row=rowNumber,
column=start + 3,
value=lm_adj_stability_alpha_Ane[i]["rsquared"][item],
)
ws.cell(
row=rowNumber,
column=start + 4,
value=lm_adj_stability_alpha_Ane[i]["difference"][item],
)
ws.cell(
row=rowNumber,
column=start + 5,
value=lm_adj_stability_alpha_Ane[i]["mse"][item],
)
ws.cell(
row=rowNumber,
column=start + 6,
value=lm_adj_stability_alpha_Ane[i]["rmse"][item],
)
rowNumber = rowNumber + 1
except:
pass
className = className + 1
rowNumber = 14
if RSD_alphaFlag:
rowNumber = 26
className = 1
for i in range(0, len(lm_adj_stability_alpha_RSD)):
start = className * 8 + 1
try:
for item in lm_adj_stability_alpha_RSD[i].index.to_list():
ws.cell(row=rowNumber, column=start, value=item)
ws.cell(
row=rowNumber,
column=start + 1,
value=lm_adj_stability_alpha_RSD[i]["m"][item],
)
ws.cell(
row=rowNumber,
column=start + 2,
value=lm_adj_stability_alpha_RSD[i]["c"][item],
)
ws.cell(
row=rowNumber,
column=start + 3,
value=lm_adj_stability_alpha_RSD[i]["rsquared"][item],
)
ws.cell(
row=rowNumber,
column=start + 4,
value=lm_adj_stability_alpha_RSD[i]["difference"][item],
)
ws.cell(
row=rowNumber,
column=start + 5,
value=lm_adj_stability_alpha_RSD[i]["mse"][item],
)
ws.cell(
row=rowNumber,
column=start + 6,
value=lm_adj_stability_alpha_RSD[i]["rmse"][item],
)
rowNumber = rowNumber + 1
except:
pass
className = className + 1
rowNumber = 26
rowNumber = 37
Result_df_mean_1mps = pd.DataFrame()
Result_df_mean_05mps = pd.DataFrame()
Result_df_std_1mps = pd.DataFrame()
Result_df_std_05mps = | pd.DataFrame() | pandas.DataFrame |
import os
import time
import pandas as pd
import numpy as np
import functools
from functools import reduce
def time_pass(func):
@functools.wraps(func)
def wrapper(*args, **kw):
time_begin = time.time()
result = func(*args, **kw)
time_stop = time.time()
time_passed = time_stop - time_begin
minutes, seconds = divmod(time_passed, 60)
hours, minutes = divmod(minutes, 60)
print('%s: %s:%s:%s' % (func.__name__, int(hours), int(minutes), int(seconds)))
return result
return wrapper
@time_pass
def complete_data(the_dat_edge, the_dat_app, the_input_path):
"""
把剩下的数据读取之后拼接到前面读取的数据后面
"""
def read_big_table(path):
reader = pd.read_table(path, header=None, chunksize=10000)
data = pd.concat(reader, axis=0, ignore_index=True)
return data
def read_edge(filename): # 定义一个读取数据的函数来批量读取那些被分开的数据集
tmp = read_big_table(os.path.join(the_input_path, "open_data/dat_edge/%s" % filename))
tmp.columns = ['from_id', 'to_id', 'info']
return tmp
dat_edge_names = ['dat_edge_%s' % str(x) for x in list(range(2, 12))]
dat_edge_left = reduce(lambda x, y: x.append(y),
(read_edge(filename) for filename in dat_edge_names))
def read_app(filename): # 定义一个读取数据的函数来批量读取那些被分开的数据集
tmp = read_big_table(os.path.join(the_input_path, "open_data/dat_app/%s" % filename))
tmp.columns = ['id', 'apps']
return tmp
dat_app_names = ['dat_app_%s' % str(x) for x in list(range(2, 8))]
dat_app_left = reduce(lambda x, y: x.append(y),
(read_app(filename) for filename in dat_app_names))
dat_edge_1 = the_dat_edge.append(dat_edge_left) # 把第一个数据和剩下的数据合并起来
dat_app_1 = the_dat_app.append(dat_app_left) # 把第一个数据和剩下的数据合并起来
return dat_edge_1, dat_app_1
@time_pass
def dummy_symbol(the_dat_symbol):
"""
1. 把dat_symbol的一级分类的所有可能取值all_first挑出来,
2. 然后得到:每一个id的'symbol'列里的一级分类是否包含all_first,得到0-1向量
3. 同样的处理一级分类和二级分类的组合,单独处理二级分类我觉得没这个必要了
"""
def get_first(string):
f_s = string.split(',')
first = set(list(map(lambda x: x.split('_')[0], f_s)))
return first
def get_second(string):
f_s = string.split(',')
second = set(list(map(lambda x: x.split('_')[1], f_s)))
return second
def get_both(string):
f_s = string.split(',')
return set(f_s)
def is_in_first(string):
f_s = string.split(',')
first = set(list(map(lambda x: x.split('_')[0], f_s)))
is_in = list(map(lambda x: x in first, all_first))
return is_in
def is_in_second(string):
f_s = string.split(',')
second = set(list(map(lambda x: x.split('_')[1], f_s)))
is_in = list(map(lambda x: x in second, all_second))
return is_in
def is_in_both(string):
f_s = set(string.split(','))
is_in = list(map(lambda x: x in f_s, all_both))
return is_in
tmp = the_dat_symbol['symbol'].unique()
# 获取所有的一级分类和一二级分类
all_first = reduce(lambda x, y: x.union(y),
map(get_first, tmp))
all_second = reduce(lambda x, y: x.union(y),
map(get_second, tmp))
all_both = reduce(lambda x, y: x.union(y),
map(get_both, tmp))
# 得到每个id的0-1向量,存储成DataFrame
in_first_0 = pd.DataFrame(list(map(is_in_first, the_dat_symbol['symbol'])),
columns=all_first)
in_second_0 = pd.DataFrame(list(map(is_in_second, the_dat_symbol['symbol'])),
columns=all_second)
in_both_0 = pd.DataFrame(list(map(is_in_both, the_dat_symbol['symbol'])),
columns=all_both)
in_first_1 = pd.concat([the_dat_symbol[['id']], in_first_0], axis=1) + 0
in_second_1 = pd.concat([the_dat_symbol[['id']], in_second_0], axis=1) + 0
in_both_1 = pd.concat([the_dat_symbol[['id']], in_both_0], axis=1) + 0
return in_first_1, in_second_1, in_both_1
@time_pass
def deal_dat_edge(data_all):
"""
1. 把dat_edge处理好,运行dat_edge.head(15),就会发现需要把第10行这类数据和其他数据分开,
2. 分为dat_edge_single,dat_edge_multi
3. 然后把dat_edge_multi处理成跟dat_edge_single一样的格式,叫做dat_edge_multi_new
4. 然后把两者合并成为dat_edge_new
5. 之后经由dat_edge_split_2把info分为三个部分:['date', 'times', 'weight']
"""
length = list(map(len, map(lambda x: x.split(','), data_all['info'])))
dat_edge_single = data_all[np.array(length) == 1]
dat_edge_multi = data_all[np.array(length) > 1]
def dat_edge_split(i):
i_info = dat_edge_multi.iloc[i]
string = i_info['info']
s = string.split(',')
result = pd.DataFrame({'info': s,
'from_id': [i_info['from_id']] * len(s),
'to_id': [i_info['to_id']] * len(s),
'id': [i_info['id']] * len(s)})
return result[['id', 'from_id', 'to_id', 'info']]
all_df = map(dat_edge_split, range(len(dat_edge_multi)))
dat_edge_multi_new = pd.concat(all_df, axis=0, ignore_index=True) # 比较慢
dat_edge_new = pd.concat([dat_edge_single, dat_edge_multi_new], axis=0, ignore_index=True)
# dat_edge_new = dat_edge_single.append(dat_edge_multi_new, ignore_index=True)
@time_pass
def dat_edge_split_2(data):
def split(string):
date, left = string.split(':')
times, weight = left.split('_')
return date, times, weight
info_df = pd.DataFrame(list(map(split, data['info'])),
columns=['date', 'times', 'weight'])
data_new_2 = pd.concat([data[['id', 'from_id', 'to_id']], info_df], axis=1)
return data_new_2
dat_edge_new_2 = dat_edge_split_2(dat_edge_new)
return dat_edge_new_2
@time_pass
def deal_edge(the_sample_train, the_dat_edge):
"""
提取出每一个用户的“流出”特征: 向量长度、times之和、times的中位数、最小值、最大值
weight之和、weight的中位数、最小值、最大值,这样就用9个特征提取出了“流出”特征
"""
col_names = (['length', 'unique_count', 'times_sum', 'weight_sum']
+ ['dup_ratio_left', 'dup_ratio_1', 'dup_ratio_2', 'dup_ratio_3', 'dup_ratio_4', 'dup_ratio_5']
+ ['times_left', 'times_1', 'times_2', 'times_3', 'times_4', 'times_5',
'times_6', 'times_7', 'times_8', 'times_9', 'times_10']
+ ['times_min', 'times_25', 'times_median', 'times_75', 'times_max']
+ ['weight_min', 'weight_25', 'weight_median', 'weight_75', 'weight_max']
+ ['times_up_out_ratio', 'times_low_out_ratio']
+ ['weight_up_out_ratio', 'weight_low_out_ratio']
+ ['time_sign_trend', 'time_abs', 'weight_sign_trend', 'weight_abs']
+ ['times_2017_11', 'times_2017_12', 'times_2017_13']
+ ['weight_2017_11', 'weight_2017_12', 'weight_2017_13']
+ ['date_unique_count', 'date_min', 'date_max', 'days_gap']
+ ['latest_times', 'latest_peoples', 'latest_weights', 'multi_ratio'])
sample_dat_edge_from = pd.merge(the_sample_train, the_dat_edge,
left_on='id', right_on='from_id',
how='inner')
dat_edge_from = deal_dat_edge(sample_dat_edge_from)
dat_edge_from['times'] = list(map(int, dat_edge_from['times']))
dat_edge_from['weight'] = list(map(float, dat_edge_from['weight']))
unique_id_from = np.unique(dat_edge_from['id'])
feature_9_1 = list(map(lambda x: cal_9_feature(x, dat_edge_from, 'to_id'), unique_id_from))
df_feature_9_1 = pd.DataFrame(feature_9_1, columns=['out_%s' % x for x in col_names])
df_feature_9_1['id'] = unique_id_from
# 提取出每一个用户的“流入”特征,类似上面的,可以提取出9个“流入”特征
sample_dat_edge_to = pd.merge(the_sample_train, the_dat_edge,
left_on='id', right_on='to_id',
how='inner')
dat_edge_to = deal_dat_edge(sample_dat_edge_to)
dat_edge_to['times'] = list(map(int, dat_edge_to['times']))
dat_edge_to['weight'] = list(map(float, dat_edge_to['weight']))
unique_id_to = np.unique(dat_edge_to['id'])
feature_9_2 = list(map(lambda x: cal_9_feature(x, dat_edge_to, 'from_id'), unique_id_to))
df_feature_9_2 = pd.DataFrame(feature_9_2, columns=['in_%s' % x for x in col_names])
df_feature_9_2['id'] = unique_id_to
unique_id_both = list(set(unique_id_from).union(set(unique_id_to)))
feature_9_3 = list(map(lambda x: cal_both(x, dat_edge_from, dat_edge_to), unique_id_both))
df_feature_9_3 = pd.DataFrame(feature_9_3, columns=['both_%s' % x for x in col_names])
df_feature_9_3['id'] = unique_id_both
# 接下来需要把df_feature_9_1和df_feature_9_2, df_feature_9_3以并联方式merge起来,
# 然后左连接到sample_train上
the_df_feature_18 = reduce(lambda x, y: | pd.merge(x, y, on='id', how='outer') | pandas.merge |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.