prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from collections import OrderedDict
from functools import partial
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from storefact import get_store_from_url
from plateau.core.dataset import DatasetMetadata
from plateau.core.uuid import gen_uuid
from plateau.io.eager import read_table
from plateau.io_components.metapartition import MetaPartition
from plateau.serialization import DataFrameSerializer
class NoPickle:
def __getstate__(self):
raise RuntimeError("do NOT pickle this object!")
def mark_nopickle(obj):
setattr(obj, "_nopickle", NoPickle())
def no_pickle_store(url):
store = get_store_from_url(url)
mark_nopickle(store)
return store
def no_pickle_factory(url):
return partial(no_pickle_store, url)
@pytest.fixture(params=["URL", "KeyValue", "Callable"])
def store_input_types(request, tmpdir):
url = f"hfs://{tmpdir}"
if request.param == "URL":
return url
elif request.param == "KeyValue":
return get_store_from_url(url)
elif request.param == "Callable":
return no_pickle_factory(url)
else:
raise RuntimeError(f"Encountered unknown store type {type(request.param)}")
def test_store_input_types(store_input_types, bound_store_dataframes):
from plateau.serialization.testing import get_dataframe_not_nested
dataset_uuid = "dataset_uuid"
df = get_dataframe_not_nested(10)
assert bound_store_dataframes(
[df],
dataset_uuid=dataset_uuid,
store=store_input_types,
partition_on=[df.columns[0]],
secondary_indices=[df.columns[1]],
)
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [df.copy(deep=True), df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
assert len(store.keys()) == 4
assert "dataset_uuid/table/_common_metadata" in store
assert "dataset_uuid.by-dataset-metadata.json" in store
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_list = [df.copy(deep=True), df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
actual_keys = set(store.keys())
assert len(actual_keys) == 14 # one per partition + json + schema
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [df.copy(deep=True), df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
counter = 0
for k in store.keys():
if "parquet" in k and "indices" not in k:
counter += 1
df_stored = DataFrameSerializer.restore_dataframe(key=k, store=store)
pdt.assert_frame_equal(df, df_stored)
assert counter == 2
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
assert df_empty.empty
df_list = [df_empty]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 1
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=next(iter(dataset.partitions.values())).files["table"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
# TODO: Kick this out?
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [[df, df2]]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices="P",
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
assert "P" in dataset.indices
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [df.copy(deep=True)]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 1
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(label=gen_uuid(), data=df, metadata_version=metadata_version)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [df.head(0), df]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
secondary_indices="location",
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.uint64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X":
|
pd.Series([2], dtype=np.int64)
|
pandas.Series
|
import pandas as pd
import math
import numpy as np
from timeit import default_timer as timer
from tqdm import tqdm
pd.options.display.max_columns = 100
def calculateDistance(x1, y1, x2, y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def add_play_physics(play):
# Format columns
play['time'] = pd.to_datetime(play['time'])
# Distance
play['dis_meters'] = play['dis'] / 1.0936 # Add distance in meters
# Speed
play['dis_meters'] / 0.01
play['v_mps'] = play['dis_meters'] / 0.1
# Angles to radians
play['dir_radians'] = play['dir'].apply(math.radians)
play['o_radians'] = play['o'].apply(math.radians)
average_weight_nfl_pounds = 245.86
average_weight_nfl_kg = average_weight_nfl_pounds * 0.45359237
# http://webpages.uidaho.edu/~renaes/251/HON/Student%20PPTs/Avg%20NFL%20ht%20wt.pdf
play['momentum'] = play['v_mps'] * average_weight_nfl_kg
play['momentum_x'] = pol2cart(play['momentum'], play['dir_radians'])[0]
play['momentum_y'] = pol2cart(play['momentum'], play['dir_radians'])[1]
return play
"""
This code loops through every play and:
1. For each moment in time of the play, for each player in the play:
- Finds the closest other player to them.
- Computes the resulting force of the two in relation to eachother.
- If the force is higher this indicates a higher danger probability.
"""
def calculate_position_details(moment):
return
def calculate_3_closest(play):
play_danger_df = pd.DataFrame()
for time, d in play.groupby('time'):
for role1, r1data in d.groupby(['role', 'gsisid']):
if r1data.shape[0] != 1:
print('ERROR: Multiple values for role {} at time {} is not 1'.format(role1,
time))
# Loop through other roles to see the closest other player
min_dist = 1 # Large number greater thank any possible distance
for role2, r2data in d.groupby(['role', 'gsisid']):
if r2data['gsisid'].values[0] != role1[1]:
# Check to make sure r1 only has one value
if r2data.shape[0] != 1:
print('ERROR: Multiple values for role {} at time {} is not 1'.format(role2,
time))
x1 = r1data['x'].values[0]
x2 = r2data['x'].values[0]
y1 = r1data['y'].values[0]
y2 = r2data['y'].values[0]
this_distance = calculateDistance(x1, y1, x2, y2)
if this_distance < min_dist:
# min_dist = this_distance
# closest_data = r2data
df = pd.merge(r1data,
r2data,
on='time',
suffixes=('', '_partner'))
df['distance_to_partner'] = this_distance
play_danger_df = pd.concat([play_danger_df, df])
play_danger_df = play_danger_df.reset_index()
play_danger_df['opp_momentum'] = np.sqrt(np.square(
play_danger_df['momentum_x'] - play_danger_df['momentum_x_partner']) +
np.square(play_danger_df['momentum_y'] - play_danger_df['momentum_y_partner']))
return play_danger_df
pi =
|
pd.read_csv('../input/play_information.csv')
|
pandas.read_csv
|
#!/usr/bin/env python3
import os
import numpy as np
import pandas as pd
import loompy as lp
from sklearn.manifold import TSNE
from pyscenic.aucell import aucell
from pyscenic.genesig import Regulon, GeneSignature
from typing import List, Mapping, Sequence, Optional, Dict
from operator import attrgetter
from multiprocessing import cpu_count
from pyscenic.binarization import binarize
from itertools import chain, repeat, islice
import networkx as nx
import zlib
import base64
import json
import re
import sys
from collections import OrderedDict
class Embedding:
def __init__(
self, embedding: pd.DataFrame, embedding_name: str, is_default: bool = False
):
self.embedding = embedding
self.embedding_name = embedding_name
self._is_default = is_default
def get_embedding_name(self):
return self.embedding_name
def get_embedding(self):
return self.embedding
def is_default(self):
return self._is_default
class SCopeLoom:
def __init__(
self,
filename: str = None,
out_fname: str = None,
ex_mtx: pd.DataFrame = None,
regulons: List[Regulon] = None,
cell_annotations: Optional[Mapping[str, str]] = None,
tree_structure: Sequence[str] = None,
title: Optional[str] = None,
nomenclature: str = "Unknown",
num_workers: int = cpu_count(),
auc_mtx=None,
auc_regulon_weights_key="gene2weight",
auc_thresholds=None,
compress: bool = False,
save_additional_regulon_meta_data: bool = False, # Should be set to true only for multi-runs SCENIC
set_generic_loom: bool = False,
tag: str = None, # Used when merging track and motif-based SCENIC run
col_attrs: Mapping[str, np.ndarray] = None,
row_attrs: Mapping[str, np.ndarray] = None,
global_attrs: Dict = None,
embeddings: Mapping[str, pd.DataFrame] = None,
):
self.filename = filename
self.out_fname = out_fname
self.ex_mtx = ex_mtx
self.regulons = regulons
self.cell_annotations = cell_annotations
self.tree_structure = tree_structure if tree_structure else ()
self.title = title
self.nomenclature = nomenclature
self.num_workers = num_workers
self.auc_mtx = auc_mtx
self.auc_regulon_weights_key = auc_regulon_weights_key
self.auc_thresholds = auc_thresholds
self.compress = compress
self.save_additional_regulon_meta_data = save_additional_regulon_meta_data
self.tag = tag
self.regulon_filter = None
# Loom representation
self.col_attrs = col_attrs if col_attrs else {}
self.row_attrs = row_attrs if row_attrs else {}
self.global_attrs = global_attrs if global_attrs else {}
# Internal representation
self.embeddings = embeddings if embeddings else {}
# Utils
self.id2name = OrderedDict()
# Set Base Loom
if set_generic_loom:
self.set_generic_loom()
def set_generic_loom(self):
if self.regulons[0].name.find("_") == -1:
print(
"Regulon name does not seem to be compatible with SCOPE. It should include a space to allow selection of the TF.",
"\nPlease run: \n regulons = [r.rename(r.name.replace('(+)','_('+str(len(r))+'g)')) for r in regulons]",
"\nor:\n regulons = [r.rename(r.name.replace('(','_(')) for r in regulons]",
)
self.set_cell_annotations()
if self.auc_mtx is None:
self.auc_mtx = self.calculate_regulon_enrichment()
if self.auc_thresholds is None:
self.auc_thresholds = self.binarize_regulon_enrichment()
if len(self.embeddings.keys()) == 0:
self.create_loom_default_embedding()
self.regulon_gene_assignment = self.create_loom_regulon_gene_assignment()
self.ngenes = self.calculate_nb_genes_per_cell()
self.clusterings = self.create_loom_clusterings()
self.regulon_thresholds = self.create_loom_md_regulon_thresholds()
self.set_generic_col_attrs()
self.set_generic_row_attrs()
self.set_generic_global_attrs()
self.set_tree()
#######
# I/O #
#######
@staticmethod
def read_loom(filename: str, tag: str = None):
with lp.connect(filename, mode="r", validate=False) as loom:
# Load the content into memory
# Set the main matrix
ex_mtx = pd.DataFrame(
loom[:, :], index=loom.ra.Gene, columns=loom.ca.CellID
).T
# Set the column, row and global attribute using the underlying Dict of the AttributeManager
col_attrs = {k: v for k, v in loom.ca.items()}
row_attrs = {k: v for k, v in loom.ra.items()}
global_attrs = {k: v for k, v in loom.attrs.items()}
# Decompress and decode the MetaData global attribute
try:
global_attrs["MetaData"] = SCopeLoom.decompress_decode(
value=global_attrs["MetaData"]
)
except Exception:
# MetaData is uncompressed
global_attrs["MetaData"] = json.loads(global_attrs["MetaData"])
scope_loom = SCopeLoom(
filename=filename,
ex_mtx=ex_mtx,
col_attrs=col_attrs,
row_attrs=row_attrs,
global_attrs=global_attrs,
tag=tag,
)
if "embeddings" in scope_loom.get_meta_data():
scope_loom.convert_loom_embeddings_repr_to_internal_repr()
# If multi-runs mode
is_multi_runs_mode = scope_loom.has_scenic_multi_runs_data()
if is_multi_runs_mode:
scope_loom.set_scenic_min_genes_regulon(
min_genes_regulon=global_attrs["MetaData"]["regulonSettings"][
"min_genes_regulon"
]
)
scope_loom.set_scenic_min_regulon_gene_occurrence(
min_regulon_gene_occurrence=global_attrs["MetaData"]["regulonSettings"][
"min_regulon_gene_occurrence"
]
)
return scope_loom
#############
# Meta Data #
#############
def add_meta_data(self, _dict):
md = self.global_attrs["MetaData"]
md.update(_dict)
self.global_attrs["MetaData"] = md
def get_meta_data(self):
return self.global_attrs["MetaData"]
def set_tree(self):
assert len(self.tree_structure) <= 3, ""
self.global_attrs.update(
("SCopeTreeL{}".format(idx + 1), category)
for idx, category in enumerate(
list(islice(chain(self.tree_structure, repeat("")), 3))
)
)
#############
# Features #
#############
def get_genes(self):
return self.row_attrs["Gene"]
################
# Observations #
################
def get_cell_ids(self):
return self.col_attrs["CellID"]
###############
# Annotations #
###############
def set_cell_annotations(self):
if self.cell_annotations is None:
self.cell_annotations = dict(
zip(self.ex_mtx.index.astype(str), ["-"] * self.ex_mtx.shape[0])
)
###########
# Metrics #
###########
def calculate_nb_genes_per_cell(self):
# Calculate the number of genes per cell.
binary_mtx = self.ex_mtx.copy()
binary_mtx[binary_mtx != 0] = 1.0
return binary_mtx.sum(axis=1).astype(int)
def add_metrics(self, metrics: List[str]):
md_metrics = []
for metric in metrics:
md_metrics.append({"name": metric})
self.global_attrs["MetaData"].update({"metrics": md_metrics})
##############
# Embeddings #
##############
@staticmethod
def get_embedding_id(embedding: Embedding, _list):
"""Returns the appropriate index as a string given the _list
Parameters:
None
Returns:
str: Returns -1 if the given embedding is the default, 0 if the given _list is empty and length of the given _list minus 1
"""
if embedding.is_default():
return "-1"
elif len(_list) == "0":
return "0"
else:
return str(len(_list) - 1)
def convert_loom_embeddings_repr_to_internal_repr(self):
for embedding in self.get_meta_data()["embeddings"]:
self.add_embedding(
embedding=self.get_embedding_by_id(embedding_id=embedding["id"]),
embedding_name=embedding["name"],
is_default=True if str(embedding["id"]) == "-1" else False,
)
def get_embedding_by_id(self, embedding_id):
if str(embedding_id) == "-1":
return self.col_attrs["Embedding"]
x = self.col_attrs["Embeddings_X"][str(embedding_id)]
y = self.col_attrs["Embeddings_Y"][str(embedding_id)]
return np.column_stack((x, y))
def has_embedding(self, embedding_name):
return embedding_name in self.embeddings.keys()
def add_embedding(
self, embedding: np.ndarray, embedding_name, is_default: bool = False
):
df_embedding = pd.DataFrame(
embedding, columns=["_X", "_Y"], index=self.ex_mtx.index
)
_embedding = Embedding(
embedding=df_embedding, embedding_name=embedding_name, is_default=is_default
)
if is_default:
self.default_embedding = _embedding
self.embeddings[embedding_name] = _embedding
def create_loom_default_embedding(self):
# Create an embedding based on tSNE.
# Name of columns should be "_X" and "_Y".
self.add_embedding(
embedding=TSNE().fit_transform(self.auc_mtx),
embedding_name="tSNE (default)",
is_default=True,
)
def create_loom_md_embeddings_repr(self):
"""Returns a Dictionary (Dict) for the global meta data embeddings with preformated data to be stored in Loom file format and compatible with SCope.
Parameters:
None
Returns:
dict: A Dictionary (Dict) for the global meta data embeddings with preformated data to be stored in Loom file format and compatible with SCope.
"""
md_embeddings = []
for _, embedding in self.embeddings.items():
if embedding.is_default():
md_embeddings = md_embeddings + [
{"id": "-1", "name": embedding.get_embedding_name()}
]
else:
md_embeddings = md_embeddings + [
{
"id": SCopeLoom.get_embedding_id(
embedding=embedding, _list=md_embeddings
),
"name": embedding.get_embedding_name(),
}
]
return {"embeddings": md_embeddings}
def create_loom_ca_embeddings_repr(self):
"""Returns a Dictionary (Dict) for the embeddings with preformated data to be stored in Loom file format and compatible with SCope.
Parameters:
None
Returns:
dict: A Dictionary (Dict) for the embeddings with preformated data to be stored in Loom file format and compatible with SCope.
"""
default_embedding = None
embeddings_X = pd.DataFrame(index=self.ex_mtx.index)
embeddings_Y =
|
pd.DataFrame(index=self.ex_mtx.index)
|
pandas.DataFrame
|
import geopandas as gpd
import os
import pandas as pd
import numpy as np
path = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\Data\GISData\Updated'
datapath = r'T:\MPO\RTP\FY20 2045 Update\Data and Resources\Data\ForAppendixF'
categories = ['Auto']*7 + ['Transit']*2 + ['Bike/Ped']*4
project_types = ['Added Freeway Lanes or Major Interchange Improvements',
'Arterial Capacity Improvements',
'New Arterial Link or Interchange',
'New Collectors',
'Study',
'Transit Oriented Development Implementation',
'Urban Standards',
'Frequent Transit Network',
'Stations',
'Multi-Use Paths Without Road Project',
'Multi-Use Paths With Road Project',
'On-Street Lanes or Routes With Road Project',
'On-Street Lanes or Routes Without Road Project']
varcats = ['EJ', 'Cultural Resources', 'Air Quality', 'Water Quality',
'Sensitive Habitat', 'Hazard Mitigation', 'MPO Area']
folders = ['EJ', 'Historic', 'AirQuality', 'WaterQuality',
'SensitiveHabitats', 'NaturalHazards', 'RTP']
filelist = [['equity_area.shp'],
['NationalRegisterHistoricDistrictsCLMPO.shp',
'NationalRegisterHistoricSitesCLMPO.shp'],
['AirQualityMaintenanceArea.shp'],
['DEQ303dListedStreams.shp',
'SWV_GWMA.shp',
'NavigableRivers.shp',
'wetlandsCLMPO.shp'],
['ODFW_COAs_CLMPO.shp',
'CRITHAB_CLMPO.shp'],
['Flood100yearCLMPO.shp',
'EarthquakeLayer.shp'],
['MPO_Boundary.shp']]
keywordlist = [['equity'],
['HistoricDistricts', 'HistoricSites'],
['DEQ', 'SWV', 'Rivers', 'wetlands'],
['ODFW', 'CRITHAB'],
['Flood', 'Earthquake']]
varnmlist = [['Communities of Concern'],
['Historic Districts', 'Historic Sites'],
['303d Streams', 'GWMA', 'Navigable Rivers', 'Wetlands'],
['Conservation Opportunity Areas', 'USFWS Critical Habitat'],
['FEMA Flood Hazard', 'Seismic Zones']]
n = [0, 1, 3, 4, 5]
outnames = ['Communities_of_Concern', 'Cultural_Resources', 'Water_Quality',
'Sensitive_Habitat', 'Hazard_Mitigation']
# total number of RTP projects is 247
tot_rtp_prj = 247
# get the summary table
def sum_RTP(export=False):
for varcat in varcats:
k = varcats.index(varcat)
df = sum_RTP_for_each_env_category(varcat=varcat, folder=folders[k], files=filelist[k])
if k==0:
ndf = df
else:
ndf = pd.concat([ndf, df[[varcat]]], axis=1)
#print(varcat)
tdf = pd.concat([pd.DataFrame(data={'Project Category': [''], 'Project Type': ['TOTAL']}),
pd.DataFrame(ndf[ndf.columns[2:]].apply(np.sum, axis=0)).T], axis=1)
pdf = pd.concat([pd.DataFrame(data={'Project Category': [''], 'Project Type': ['PERCENT OF ALL CONSTRAINED PROJECTS']}),
pd.DataFrame(ndf[ndf.columns[2:]].apply(lambda x: int(sum(x)/tot_rtp_prj*100+0.5), axis=0)).T], axis=1)
ndf = pd.concat([ndf, tdf, pdf])
#print(ndf)
if export:
ndf.to_csv(os.path.join(datapath, 'Tables', 'Summary.csv'), index=False)
return ndf
# summarize the number of projects when the env factor includes multiple layers
def sum_RTP_for_each_env_category(varcat = 'Cultural Resources', folder = 'Historic',
files = ['NationalRegisterHistoricDistrictsCLMPO.shp',
'NationalRegisterHistoricSitesCLMPO.shp']):
df = pd.DataFrame(data={'Project Category': categories, 'Project Type': project_types})
df[varcat] = 0
# rwdf: roadway dataframe
rwdf = sum_RTP_by_mode(mode='roadway', folder=folder, varcat=varcat, files=files)
if isinstance(rwdf, pd.DataFrame):
for ind in list(rwdf.index):
df.loc[df['Project Type'] == ind, varcat] = rwdf.loc[ind, varcat]
# bpdf: bike/ped dataframe
bpdf = sum_RTP_by_mode(mode='bikeped', folder=folder, varcat=varcat, files=files)
if isinstance(bpdf, pd.DataFrame):
for ind in list(bpdf.index):
df.loc[df['Project Type'] == ind, varcat] = bpdf.loc[ind, varcat]
transitdf = sum_RTP_by_mode(mode='transit', folder=folder, varcat=varcat, files=files)
df.loc[df['Project Type'] == 'Frequent Transit Network', varcat] = transitdf.loc['Frequent Transit Network', varcat]
df.loc[df['Project Type'] == 'Stations', varcat] = transitdf.loc['Stations', varcat]
return df
def sum_RTP_by_mode(mode = 'roadway', folder = 'Historic',
varcat = 'Cultural Resources',
files = ['NationalRegisterHistoricDistrictsCLMPO.shp',
'NationalRegisterHistoricSitesCLMPO.shp']):
if mode == 'roadway':
line_res = sum_RTP_by_shp(shapefile='Roadway_lines', folder=folder,
varcat=varcat, files=files)
pnt_res = sum_RTP_by_shp(shapefile='Roadway_points', folder=folder,
varcat=varcat, files=files)
elif mode == 'bikeped':
line_res = sum_RTP_by_shp(shapefile='BikePed', folder=folder,
varcat=varcat, files=files)
pnt_res = sum_RTP_by_shp(shapefile='BikePed_points', folder=folder,
varcat=varcat, files=files)
elif mode == 'transit':
ftn = []
sta = []
for file in files:
ftndf = RTP_counted_by_intersection(shapefile='FrequentTransitNetwork',
folder=folder, file=file,
transit=True, ftn=True)
stadf = RTP_counted_by_intersection(shapefile='stations',
folder=folder, file=file, transit=True)
ftn+=list(ftndf.index)
sta+=list(stadf.index)
df = pd.DataFrame(data={varcat: [len(unique(ftn)), len(unique(sta))]})
df.index = ['Frequent Transit Network', 'Stations']
if mode == 'transit':
res = df
else:
if isinstance(line_res, pd.DataFrame) and isinstance(pnt_res, pd.DataFrame):
for ind in list(pnt_res.index):
line_res.loc[ind, varcat] = line_res.loc[ind, varcat] + pnt_res.loc[ind, varcat]
res = line_res
else:
if isinstance(line_res, pd.DataFrame):
res = line_res
elif isinstance(pnt_res, pd.DataFrame):
res = pnt_res
else:
res = 0
return res
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return(unique_list)
def sum_RTP_by_shp(shapefile='Roadway_lines', folder = 'Historic',
varcat = 'Cultural Resources',
files = ['NationalRegisterHistoricDistrictsCLMPO.shp',
'NationalRegisterHistoricSitesCLMPO.shp']):
for file in files:
df = RTP_counted_by_intersection(shapefile=shapefile, folder=folder,
file=file, returnID=True)
if file==files[0]:
ndf = df
else:
if ndf.shape[0] == 0:
ndf = ndf.append(df)
else:
if df.shape[0] != 0:
for ind in list(df.index):
if ind in list(ndf.index):
ndf.loc[ind, 'RTP_ID'].update(df.loc[ind, 'RTP_ID'])
else:
sdf =
|
pd.DataFrame(df.loc[ind, :])
|
pandas.DataFrame
|
# -*- encoding: utf-8 -*-
"""
All functionality related to ARMA models.
"""
from __future__ import division, print_function, absolute_import,\
unicode_literals
import re
import logging
import operator
import itertools
import six
import numpy as np
import pandas as pd
from numpy import linalg
from scipy import optimize
from six.moves import xrange
from . import utils
from . import stats
from .utils import UnicodeMixin
__author__ = "<NAME>"
__copyright__ = "Blue Yonder"
__license__ = "new BSD"
_logger = logging.getLogger(__name__)
class ARMAError(Exception, UnicodeMixin):
def __unicode__(self):
return self.message
class ARMA(UnicodeMixin):
"""
A(L)y(t) = B(L)e(t) + C(L)u(t) - TREND(t)
* L: Lag/Shift operator,
* A: (axpxp) tensor to define auto-regression,
* B: (bxpxp) tensor to define moving-average,
* C: (cxpxm) tensor for external input,
* e: (txp) matrix of unobserved disturbance (white noise),
* y: (txp) matrix of observed output variables,
* u: (mxt) matrix of input variables,
* TREND: (txp) matrix like y or a p-dim vector.
If B is net set, fall back to VAR, i.e. B(L) = I.
"""
def __init__(self, A, B=None, C=None, TREND=None, rand_state=None):
self.A = np.asarray(A[0]).reshape(A[1], order='F')
if B is not None:
self.B = np.asarray(B[0]).reshape(B[1], order='F')
else:
# Set B(L) = I
shape = A[1][1:]
self.B = np.empty(shape=np.hstack(([1], shape)))
self.B[0] = np.eye(*shape)
if C is not None:
self.C = np.asarray(C[0]).reshape(C[1], order='F')
else:
self.C = np.empty((0, 0, 0))
if TREND is not None:
self.TREND = np.asarray(TREND)
else:
self.TREND = None
self._check_consistency()
self.Aconst = np.zeros(self.A.shape, dtype=np.bool)
self.Bconst = np.zeros(self.B.shape, dtype=np.bool)
self.Cconst = np.zeros(self.C.shape, dtype=np.bool)
if rand_state is None:
self.rand = np.random.RandomState()
elif isinstance(rand_state, np.random.RandomState):
self.rand = rand_state
else:
self.rand = np.random.RandomState(rand_state)
def _get_num_non_consts(self):
a = np.sum(~self.Aconst)
b = np.sum(~self.Bconst)
c = np.sum(~self.Cconst)
return a, b, c
@property
def non_consts(self):
"""
Parameters of the ARMA model that are non-constant.
:return: array
"""
a = self.A[~self.Aconst]
b = self.B[~self.Bconst]
c = self.C[~self.Cconst]
return np.hstack([a, b, c])
@non_consts.setter
def non_consts(self, values):
"""
Set the parameters of the ARMA model that are non-constant.
:param values: array
"""
parts = np.cumsum(self._get_num_non_consts())
if values.size != parts[2]:
raise ARMAError("Number of values does not equal number "
"of non-constants")
self.A[~self.Aconst] = values[:parts[0]]
self.B[~self.Bconst] = values[parts[0]:parts[1]]
self.C[~self.Cconst] = values[parts[1]:parts[2]]
def _check_consistency(self):
A, B, C, TREND = self.A, self.B, self.C, self.TREND
if A is None:
raise ARMAError("A needs to be set for an ARMA model")
n = A.shape[1]
if n != A.shape[2] or A.ndim > 3:
raise ARMAError("A needs to be of shape (a, p, p)")
if n != B.shape[1] or (n != B.shape[2] or B.ndim > 3):
raise ARMAError("B needs to be of shape (b, p, p) with A being "
"of shape (a, p, p)")
if C.size != 0 and (n != C.shape[1] or C.ndim > 3):
raise ARMAError("C needs to be of shape (c, p, m) with A being "
"of shape (a, p, p)")
if TREND is not None:
if len(TREND.shape) > 2:
raise ARMAError("TREND needs to of shape (t, p) with A being "
"of shape (a, p, p)")
elif len(TREND.shape) == 2 and n != TREND.shape[1]:
raise ARMAError("TREND needs to of shape (t, p) with A being "
"of shape (a, p, p)")
elif len(TREND.shape) == 1 and n != TREND.shape[0]:
raise ARMAError("TREND needs to of shape (t, p) with A being "
"of shape (a, p, p)")
def _get_noise(self, samples, p, lags):
w0 = self.rand.normal(size=lags * p).reshape((lags, p))
w = self.rand.normal(size=samples * p).reshape((samples, p))
return w0, w
def _prep_trend(self, dim_t, dim_p, t0=0):
trend = self.TREND
if trend is not None:
if trend.ndim == 2:
assert trend.shape[1] == dim_p
if not trend.shape[0] >= t0+dim_t:
raise ARMAError("TREND needs to be available until "
"t={}".format(t0+dim_t-1))
trend = trend[t0:t0+dim_t, :]
return trend
else:
return np.tile(trend, (dim_t, 1))
else:
return np.zeros((dim_t, dim_p))
def simulate(self, y0=None, u0=None, u=None, sampleT=100, noise=None):
"""
Simulate an ARMA model.
:param y0: lagged values of y prior to t=0 in reversed order
:param u0: lagged values of u prior to t=0 in reversed order
:param u: external input time series
:param sampleT: length of the sample to simulate
:param noise: tuple (w0, w) of a random noise time series. w0 are the
lagged values of w prior to t=0 in reversed order. By default a normal
distribution for the white noise is assumed.
:return: simulated time series as array
"""
p = self.A.shape[1]
a, b = self.A.shape[0], self.B.shape[0]
c, m = self.C.shape[0], self.C.shape[2]
y0 = utils.atleast_2d(y0) if y0 is not None else np.zeros((a, p))
u = utils.atleast_2d(u) if u0 is not None else np.zeros((c, m))
u0 = utils.atleast_2d(u0) if u0 is not None else np.zeros((c, m))
if noise is None:
noise = self._get_noise(sampleT, p, b)
w0, w = noise
assert y0.shape[0] >= a
assert w0.shape[0] >= b
assert u0.shape[0] >= c
# diagonalize with respect to matrix of A's leading coefficients
A0inv = linalg.inv(self.A[0, :, :])
A = np.tensordot(self.A, A0inv, axes=1)
B = np.tensordot(self.B, A0inv, axes=1)
if c != 0:
C = np.einsum('ijk,kl', self.C, A0inv)
else:
C = np.zeros((c, p, m))
# prepend start values to the series
y = self._prep_trend(sampleT, p)
y = np.vstack((y0[a::-1, ...], y))
w = np.vstack((w0[b::-1, ...], w))
u = np.vstack((u0[c::-1, ...], u))
# perform simulation by multiplying the lagged matrices to the vectors
# and summing over the different lags
for t in xrange(a, sampleT+a):
y[t, :] -= np.einsum('ikj, ij', A[1:, ...], y[t-1:t-a:-1, :])
if b != 0:
y[t, :] += np.einsum('ikj, ij', B, w[t-a+b:t-a:-1, :])
if c != 0:
y[t, :] += np.einsum('ikj, ij', C, u[t-a+b:t-a:-1, :])
return y[a:]
def forecast(self, y, horizon=0, u=None):
"""
Calculate an one-step-ahead forecast.
:param y: output time series
:param horizon: number of predictions after y[T_max]
:param u: external input time series
:return: predicted time series as array
"""
p = self.A.shape[1]
a, b = self.A.shape[0], self.B.shape[0]
c, m = self.C.shape[0], self.C.shape[2]
u = u if u is not None else np.zeros((c, m))
y = utils.atleast_2d(y)
sampleT = y.shape[0]
predictT = sampleT + horizon
# diagonalize with respect to matrix of B's leading coefficients
B0inv = linalg.inv(self.B[0, :, :])
A = np.tensordot(self.A, B0inv, axes=1)
B = np.tensordot(self.B, B0inv, axes=1)
if c != 0:
C = np.einsum('ijk,kl', self.C, B0inv)
else:
C = np.zeros((c, p, m))
# calculate directly the residual ...
res = -np.dot(self._prep_trend(sampleT, p)[:sampleT, ...], B0inv)
# and perform prediction
for t in xrange(sampleT):
la, lb, lc = min(a-1, t), min(b-1, t), min(c-1, t)
ba, bb, bc = max(0, t-la), max(0, t-lb), max(0, t-lc)
res[t, :] += np.einsum('ikj,ij', A[la::-1, ...], y[ba:t+1, :])
if b != 0:
res[t, :] -= np.einsum('ikj,ij', B[lb:0:-1, ...], res[bb:t, :])
if c != 0:
res[t, :] -= np.einsum('ikj,ij', C[lc::-1, ...], u[bc:t+1, :])
pred = np.zeros((predictT, p))
pred[:sampleT, :] = y[:sampleT, :] - np.dot(res, B[0, :, :])
if predictT > sampleT:
A0inv = linalg.inv(self.A[0, :, :])
A = np.tensordot(self.A, A0inv, axes=1)
B = np.tensordot(self.B, A0inv, axes=1)
if c != 0:
C = np.einsum('ijk,kl', self.C, A0inv)
else:
C = np.zeros((c, p, m))
pred[sampleT:, :] = np.dot(self._prep_trend(horizon, p, sampleT),
A0inv)
# perform prediction for horizon period
for t in xrange(sampleT, predictT):
for l in xrange(1, a):
if t - l < sampleT:
pred[t, :] -= np.dot(A[l, :, :], y[t - l, :])
else:
pred[t, :] -= np.dot(A[l, :, :], pred[t - l, :])
for l in xrange(b):
if t - l < sampleT:
pred[t, :] += np.dot(B[l, :, :], res[t - l, :])
for l in xrange(c):
pred[t, :] += np.dot(C[l, :, :], u[t - l, :])
return pred
def fix_constants(self, fuzz=1e-5, prec=1):
"""
Fix some coefficients as constants depending on their value.
Coefficient with a absolute difference of ``fuzz`` to a value of
precision ``prec`` are considered constants.
For example:
* 1.1 is constant since abs(1.1 - round(1.1, prec)) < fuzz
* 0.01 is non constant since abs(0.01 - round(0.01, prec)) > fuzz
"""
@np.vectorize
def is_const(x):
return abs(x - round(x, prec)) < fuzz
def set_const(M, Mconst):
M_mask = is_const(M)
Mconst[M_mask] = True
Mconst[~M_mask] = False
set_const(self.A, self.Aconst)
set_const(self.B, self.Bconst)
if self.C.size != 0:
set_const(self.C, self.Cconst)
def est_params(self, y):
"""
Maximum likelihood estimation of the ARMA model's coefficients.
:param y: output series
:return: optimization result (:obj:`~scipy.optimize.OptimizeResult`)
"""
y = utils.atleast_2d(y)
def cost_function(x):
self.non_consts = x
pred = self.forecast(y=y)
return stats.negloglike(pred, y)
x0 = self.non_consts
return optimize.minimize(cost_function, x0)
def _lag_matrix_to_str(self, matrix):
# creates a string from a lag array
def join_with_lag(arr):
poly = str(arr[0])
for i, val in enumerate(arr[1:], start=1):
if val != 0.:
poly += '{:+.3}L{}'.format(val, i)
return poly
res_str = ''
_, j_max, k_max = matrix.shape
mat_str = np.empty((j_max, k_max), dtype=object)
for j, k in itertools.product(xrange(j_max), xrange(k_max)):
mat_str[j, k] = join_with_lag(matrix[:, j, k])
# determine width for each column and set columns to that width
col_widths = [max(map(len, mat_str[:, k])) for k in xrange(k_max)]
for k in xrange(k_max):
fmt = np.vectorize(lambda x: '{:<{}}'.format(x, col_widths[k]))
mat_str[:, k] = fmt(mat_str[:, k])
for j in xrange(j_max):
res_str += ' '.join(mat_str[j, :]) + '\n'
return res_str
def __unicode__(self):
desc = ''
TREND = self.TREND
if TREND is not None:
desc += 'TREND=\n'
if TREND.ndim == 1:
TREND = TREND[np.newaxis, :]
arr_str = np.array_str(np.transpose(TREND)) + '\n'*2
arr_str = re.sub(r' *\[+', '', arr_str)
arr_str = re.sub(r' *\]+', '', arr_str)
desc += arr_str
for mat_name in ('A', 'B', 'C'):
matrix = getattr(self, mat_name)
if matrix.shape[0] != 0:
desc += '{}(L) =\n'.format(mat_name)
desc += self._lag_matrix_to_str(matrix) + '\n'
return desc
def plot_forecast(self, all_y, horizon=0, u=None):
"""
Calculate an one-step-ahead forecast and plot prediction and truth.
:param y: output time series
:param horizon: number of predictions after y[T_max]
:param u: external input time series
:return: predicted time series as array
"""
def get_lags_idx(arr):
# First entry is always 1 and not part of the used lags
return [str(i) for i, v in enumerate(arr.flatten()) if v != 0][1:]
if horizon > 0:
y = all_y[:-horizon]
df = pd.DataFrame({
'Future': all_y[horizon:],
'Known Truth': y
})
else:
y = all_y
df =
|
pd.DataFrame({'Truth': all_y})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 24 18:17:27 2021
@author: iannjari
"""
# import dependencies
import pandas as pd
import os
pwd= os.getcwd()
# fetch data
cases= pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
deaths= pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
# Start with the cases data for the map
# Drop the Province/State column, then group all rows by country
df1=cases.drop('Province/State',axis=1)
df1=df1.groupby(['Country/Region'],as_index=False).sum()
# Drop un-wanted columns
df1=df1.drop(['Lat','Long'],axis=1)
# Store this dataframe for later use
df1.to_excel (pwd+"\\..\\data\\cases.xlsx", index = False, header=True)
# Drop all data except last day
cols=df1[df1.columns[1:-1]]
df2=df1.drop(cols,axis=1)
# Read COUNTRY CODE data
pwd=os.getcwd()
df4=
|
pd.read_excel(pwd+"\\..\\data\\map_code1.xlsx")
|
pandas.read_excel
|
# 动态加载因子计算指标,可将因子性能指标分布式
import pdb, importlib, time
import numpy as np
import pandas as pd
from scipy import stats
from PyFin.api import *
from utilities.factor_se import *
from data.polymerize import DBPolymerize
from data.storage_engine import PerformanceStorageEngine, BenchmarkStorageEngine
from data.fetch_factor import FetchRLFactorEngine
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet': 'performance.basic_return', 'class': 'BasicReturn'},
{'packet': 'performance.icir', 'class': 'ICIR'},
{'packet': 'performance.other', 'class': 'Other'}]):
self._name = name
self._methods = methods
self._url = url
self._methods = {}
self._factor_columns = []
self._neutralized_styles = ['SIZE', 'Bank', 'RealEstate', 'Health', 'Transportation',
'Mining', 'NonFerMetal', 'HouseApp', 'LeiService', 'MachiEquip',
'BuildDeco', 'CommeTrade', 'CONMAT', 'Auto', 'Textile', 'FoodBever',
'Electronics', 'Computer', 'LightIndus', 'Utilities', 'Telecom',
'AgriForest', 'CHEM', 'Media', 'IronSteel', 'NonBankFinan', 'ELECEQP',
'AERODEF', 'Conglomerates']
for method in methods:
name = str(method['packet'].split('.')[-1])
self._methods[name] = method
def _stock_return(self, market_data):
market_data = market_data.set_index(['trade_date', 'security_code'])
market_data['close'] = market_data['close'] * market_data['lat_factor']
market_data = market_data['close'].unstack()
market_data = market_data.sort_index()
market_data = market_data.apply(lambda x: np.log(x.shift(-1) / x))
mkt_se = market_data.stack()
mkt_se.name = 'returns'
return mkt_se.dropna().reset_index()
def _index_return(self, index_data):
index_data = index_data.set_index(['trade_date'])
index_data = index_data.sort_index()
index_data['returns'] = np.log(index_data['close'].shift(-1) / index_data['close'])
return index_data.loc[:, ['returns']].dropna().reset_index()
def performance_preprocessing(self, benchmark_data, index_data, market_data, factor_data, exposure_data):
# 修改 index_se_dict, 直接删除基准df
# index_se_dict = {}
self._factor_columns = [i for i in factor_data.columns if i not in ['id', 'trade_date', 'security_code']]
# security_code_sets = index_data.security_code.unique()
# for security_code in security_code_sets:
# index_se = index_data.set_index('security_code').loc[security_code].reset_index()
# index_se_dict[security_code] = self._index_return(index_se)
index_rets = self._index_return(index_data)
mkt_se = self._stock_return(market_data)
mkt_se['returns'] = mkt_se['returns'].replace([np.inf, -np.inf], np.nan)
total_data = pd.merge(factor_data, exposure_data, on=['trade_date', 'security_code'])
total_data =
|
pd.merge(total_data, benchmark_data, on=['trade_date', 'security_code'])
|
pandas.merge
|
import logging
from collections.abc import Iterable
from typing import Optional, Union
from types import GeneratorType
from pandas import DataFrame, RangeIndex, Series
from pandas.api.types import is_numeric_dtype
from eda_report.exceptions import (
EmptyDataError,
InputError,
TargetVariableError,
)
def clean_column_labels(data: DataFrame) -> DataFrame:
"""Makes sure that *columns* have *meaningful* names.
When an ``Iterable`` is used to create a ``DataFrame`` and no column names
are provided, the column labels by default are set as a
:class:`~pandas.RangeIndex` — [0, 1, 2, ...].
This function renames such columns to ['var_1', 'var_2, 'var_3', ...],
making references and comparisons much more intuitive. It also ensures
that column labels are of type ``str`` to allow sorting and the use of
string methods.
Parameters
----------
data : DataFrame
Data to inspect and perhaps edit.
Returns
-------
DataFrame
The data, with reader-friendly column names.
"""
# Prepend "var_" to entirely numeric column labels
if isinstance(data.columns, RangeIndex) or is_numeric_dtype(data.columns):
data.columns = [f"var_{i+1}" for i in data.columns]
return data
# Ensure all column labels are of type str to allow sorting, and so that
# string methods can be used.
data.columns = [str(col) for col in data.columns]
return data
def warn_if_target_data_has_high_cardinality(
target_data: Series, threshold: int = 10
) -> None:
"""Check whether the ``target_data`` is suitable for color-coding or has
too many unique values (> ``threshold``).
Parameters
----------
target_data : Series
The data intended to color-code graphs.
threshold : int, optional
Maximum allowable cardinality, by default 10
"""
if target_data.nunique() > threshold:
logging.warning(
f"Target variable '{target_data.name}' not used to color-code "
"graphs since it has high cardinality "
f"({target_data.nunique()}) which would clutter graphs."
)
def validate_multivariate_input(data: Iterable) -> DataFrame:
"""Ensures that *multivariate input data* is of type :class:`pandas.DataFrame`.
If it isn't, this attempts to explicitly cast it as a ``DataFrame``.
Parameters
----------
data : Iterable
The data to analyse.
Returns
-------
DataFrame
The input data as a ``DataFrame``.
Raises
------
InputError
If the ``data`` cannot be cast as a :class:`~pandas.DataFrame`.
EmptyDataError
If the ``data`` has no rows (has length zero).
"""
if isinstance(data, DataFrame):
data_frame = data
else:
try:
data_frame = DataFrame(data)
except Exception:
raise InputError(
f"Expected a pandas.Dataframe object, but got {type(data)}."
)
# Attempt to infer better dtypes for object columns.
data_frame = data_frame.infer_objects()
# The data should not be empty
if len(data_frame) == 0:
raise EmptyDataError("The supplied data has length zero.")
return clean_column_labels(data_frame)
def validate_univariate_input(
data: Iterable, *, name: Optional[str] = None
) -> Series:
"""Ensures that *univariate input data* is of type :class:`pandas.Series`.
If it isn't, this attempts to explicitly cast it as a ``Series``.
Parameters
----------
data : Iterable
The data to analyse.
name : Optional[str]
The name to assign the data, by default None.
Returns
-------
Series
The input data as a ``Series``.
Raises
------
InputError
If the ``data`` cannot be cast as a :class:`~pandas.Series`.
"""
if isinstance(data, GeneratorType):
return Series(data, name=name)
elif issubclass(type(data), Iterable) and len(list(data)) > 0:
if isinstance(data, Series):
name_ = name or data.name
return data.rename(name_)
else:
try:
data =
|
Series(data, name=name)
|
pandas.Series
|
# -*- coding: utf-8 -*-
# @Author : liaozhi
# @Time : 2021-07-01
# @Contact : <EMAIL>
"""
ID特征处理(id_map)以及预训练embedding
"""
# packages
import numpy as np
import pandas as pd
from functools import reduce
from gensim.models import Word2Vec
from config import *
def get_feed_embedding():
"""
feed embedding特征
:return:
"""
# 1,加载数据
feed_embed = pd.read_csv(DATA_HOME + 'feed_embeddings.csv', header=0, index_col=False)
feed_embed['feed_embedding'] = feed_embed['feed_embedding'].apply(lambda x: [eval(_) for _ in x.strip().split(' ')])
# 2,数据处理
feed_map = dict()
feed_list = feed_embed['feedid'].unique().tolist()
feed_embedding_matrix = np.random.uniform(size=(len(feed_list) + 1, 512)) # matrix[0] for NAN
for idx, feed in enumerate(feed_list, 1):
feed_map[feed] = idx
feed_embedding_matrix[idx] = np.array(feed_embed.loc[feed_embed.feedid == feed, 'feed_embedding'].tolist()[0])
# 3,保存
file_path = join(SAVE_HOME, 'feature', 'feed_embedding.pkl')
with open(file_path, 'wb') as file:
pickle.dump(feed_map, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(feed_embedding_matrix, file, pickle.HIGHEST_PROTOCOL)
return feed_embedding_matrix, feed_map
def process_sequence_feature(id_list, id_map=None, maxlen=5):
"""
处理序列特征
:param id_list: Series id序列
:param id_map: dict 类别编码
:param maxlen: int 序列长度
:return:
"""
# 1,数据类型判断
if not isinstance(id_list, list):
return [0] * maxlen # 0 is a mask value
# 2,类别编码
if id_map:
idx_list = [id_map.get(id_, 0) for id_ in id_list]
else:
idx_list = id_list
# 3,padding
if len(idx_list) >= maxlen:
idx_list = idx_list[:maxlen]
else:
idx_list = np.pad(idx_list, pad_width=(0, maxlen - len(idx_list)), constant_values=0).tolist()
return idx_list
def process_feed_tag():
"""
处理feed的类别标签
:return:
"""
# 1,加载数据
columns = ['feedid', 'manual_tag_list', 'machine_tag_list']
feed_tag = pd.read_csv(DATA_HOME + 'feed_info.csv', header=0, index_col=False, usecols=columns)
feed_tag.rename(columns={'machine_tag_list': 'machine_tag_weight_pair_list'}, inplace=True)
# 2,数据处理
feed_tag['manual_tag_list'] = feed_tag['manual_tag_list'].str.split(';')
feed_tag['manual_tag_list'] = feed_tag['manual_tag_list'].apply(lambda x: x if isinstance(x, list) else [])
feed_tag['machine_tag_weight_pair_list'] = feed_tag['machine_tag_weight_pair_list'].str.split(';')
feed_tag['machine_tag_list'] = feed_tag['machine_tag_weight_pair_list'].apply(
lambda x: [tag_weight_pair.split(' ')[0] for tag_weight_pair in x] if isinstance(x, list) else [])
feed_tag['tag_list'] = feed_tag[['manual_tag_list', 'machine_tag_list']].apply(
lambda S: list(set(S['manual_tag_list']) | set(S['machine_tag_list'])), axis=1)
feed_tag['machine_tag_list_weight'] = feed_tag['machine_tag_weight_pair_list'].apply(
lambda x: [eval(tag_weight_pair.split(' ')[1]) for tag_weight_pair in x] if isinstance(x, list) else [])
feed_tag.drop(columns=['machine_tag_weight_pair_list'], inplace=True)
# tag map
tag_list = feed_tag['manual_tag_list'].tolist() + feed_tag['machine_tag_list'].tolist()
tag_list = reduce(lambda x, y: set(x) | set(y), tag_list)
tag_map = dict()
for idx, tag in enumerate(tag_list, 1):
tag_map[tag] = idx
# 3,构建序列特征
feed_tag['manual_tag_list'] = feed_tag['manual_tag_list'].apply(
func=process_sequence_feature,
id_map=tag_map,
maxlen=MAXLEN['manual_tag_list']
)
feed_tag['machine_tag_list'] = feed_tag['machine_tag_list'].apply(
func=process_sequence_feature,
id_map=tag_map,
maxlen=MAXLEN['machine_tag_list']
)
feed_tag['machine_tag_list_weight'] = feed_tag['machine_tag_list_weight'].apply(
func=process_sequence_feature,
id_map=None,
maxlen=MAXLEN['machine_tag_list']
)
feed_tag['tag_list'] = feed_tag['tag_list'].apply(
func=process_sequence_feature,
id_map=tag_map,
maxlen=MAXLEN['tag_list']
)
# 4,保存
file_path = join(SAVE_HOME, 'feature', 'feed_tag.pkl')
with open(file_path, 'wb') as file:
pickle.dump(feed_tag, file, pickle.HIGHEST_PROTOCOL)
return feed_tag, tag_map
def process_feed_keyword():
"""
处理feed的keyword特征
:return:
"""
# 1,加载数据
columns = ['feedid', 'manual_keyword_list', 'machine_keyword_list']
feed_keyword =
|
pd.read_csv(DATA_HOME + 'feed_info.csv', header=0, index_col=False, usecols=columns)
|
pandas.read_csv
|
import pandas as pd
import argparse
from matplotlib_venn import venn2
import matplotlib.pyplot as plt
import math
def get_args():
desc = 'Given sj files, see which splice junctions are shared/unique between datasets. Also save the unsupported junctions'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-sj_1', dest='sj_1',
help = '1st splice junction file')
parser.add_argument('-sj_1_name', dest='sj_1_name',
help = '1st splice junction file sample name ie "Gencode"')
parser.add_argument('-sj_2', dest='sj_2',
help = '2nd splice junction file')
parser.add_argument('-sj_2_name', dest='sj_2_name',
help = '2nd splice junction file sample name ie "Gencode"')
parser.add_argument('-sample', dest='sample_name',
help = 'Sample name ie "PacBio GM12878"')
parser.add_argument('--log', dest='log_sizes', default=False,
action='store_true', help = 'Log the sizes of the circles')
args = parser.parse_args()
return args
def find_intersect_counts(dfa, dfb):
temp = pd.merge(dfa, dfb, how='inner',
on=['chrom', 'start', 'stop', 'strand'])
count_ab = len(temp.index)
count_a = len(dfa.index) - count_ab
count_b = len(dfb.index) - count_ab
counts = (count_a, count_b, count_ab)
# get the unsupported long-read dfa stuff
# new = dfa[(~dfa.start.isin(temp.start))&(~dfa.stop.isin(temp.stop))&(~dfa.chrom.isin(temp.chrom))&(~dfa.strand.isin(temp.strand))]
new = dfa[~dfa.isin(temp)].dropna(how = 'all')
print('number of unsupported long read sjs : '+str(len(new.index)))
return (counts,new)
def read_sj_file(infile):
df =
|
pd.read_csv(infile, sep='\t',
names=['chrom', 'start', 'stop', 'strand'], usecols=[0,1,2,3])
|
pandas.read_csv
|
""" Test cases for GroupBy.plot """
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
|
tm.close()
|
pandas._testing.close
|
import time
import requests
import argparse
import re
import sys
import subprocess
from pathlib import Path
from util import make_video_url
import pandas as pd
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description="Retrieving whether subtitles exists or not.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("lang", type=str, help="language code (ja, en, ...)")
parser.add_argument("videoidlist", type=str, help="filename of video ID list")
parser.add_argument("--outdir", type=str, default="sub", help="dirname to save results")
parser.add_argument("--checkpoint", type=str, default=None, help="filename of list checkpoint (for restart retrieving)")
return parser.parse_args(sys.argv[1:])
def retrieve_subtitle_exists(lang, fn_videoid, outdir="sub", wait_sec=0.2, fn_checkpoint=None):
fn_sub = Path(outdir) / lang / f"{Path(fn_videoid).stem}.csv"
fn_sub.parent.mkdir(parents=True, exist_ok=True)
# if file exists, load it and restart retrieving.
if fn_checkpoint is None:
subtitle_exists = pd.DataFrame({"videoid": [], "auto": [], "sub": []}, dtype=str)
else:
subtitle_exists =
|
pd.read_csv(fn_checkpoint)
|
pandas.read_csv
|
import pandas as pd
import jieba
import numpy
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import matplotlib
def drop_stopwords(content,stopwords):
content_clean=[]
all_words=[]
for line in content:
line_clean=[]
for word in line:
if word in stopwords:
continue
line_clean.append(word)
all_words.append(str(word))
content_clean.append(line_clean)
return content_clean,all_words
pd.set_option('display.width',None)
df_news=pd.read_csv('./data/Preliminary-finals.csv',names=['category','theme','URL','content'],encoding='utf-8',sep='\t')
df_news=df_news.dropna()
# print(df_news.head())
content=df_news.content.values.tolist()
# print(content[1000])
content_S=[]
for line in content:
current_segment=jieba.lcut(line)
if len(current_segment)>1 and current_segment!='\r\n':
content_S.append(current_segment)
# print(content_S[1000])
df_content=pd.DataFrame({'content_S':content_S})
stopwords=
|
pd.read_csv('./data/stopwords.txt',index_col=False,sep='\t',quoting=3,names=['stopword'],encoding='utf-8')
|
pandas.read_csv
|
import logging as log
import os.path
import math
import pandas as pd
import numpy as np
# for combinations of metric names
from itertools import combinations, chain
from PyQt5 import QtCore
class Data:
def __init__(self):
"""
Class that stores input data.
This class will handle data import using: Data.importFile(filename).
Dataframes will be stored as a dictionary with sheet names as keys
and pandas DataFrame as values
This class will keep track of the currently selected sheet and will
return that sheet when getData() method is called.
"""
self.sheetNames = ["None"]
self._currentSheet = 0
self.STATIC_NAMES = ['T', 'FC', 'CFC']
self.STATIC_COLUMNS = len(self.STATIC_NAMES) # 3 for T, FC, CFC columns
self.dataSet = {"None": None}
# self._numCovariates = 0
self.numCovariates = 0
self._n = 0
self.containsHeader = True
self.metricNames = []
self.metricNameCombinations = []
self.metricNameDictionary = {}
self._max_interval = 0
self.setupMetricNameDictionary()
@property
def currentSheet(self):
return self._currentSheet
@currentSheet.setter
def currentSheet(self, index):
if index < len(self.sheetNames) and index >= 0:
self._currentSheet = index
log.info("Current sheet index set to %d.", index)
else:
self._currentSheet = 0
log.info("Cannot set sheet to index %d since the data does not contain a sheet with that index.\
Sheet index instead set to 0.", index)
@property
def n(self):
self._n = self.dataSet[self.sheetNames[self._currentSheet]]['FC'].size
return self._n
@property
def max_interval(self):
return self._max_interval
@max_interval.setter
def max_interval(self, interval):
if interval < 5:
self._max_interval = 5
else:
self._max_interval = interval
def getData(self):
"""
Returns dataframe corresponding to the currentSheet index
"""
full_dataset = self.dataSet[self.sheetNames[self._currentSheet]]
try:
subset = full_dataset[:self._max_interval]
except TypeError:
# if None type, data hasn't been loaded
# cannot subscript None type
return full_dataset
return subset
def getDataSubset(self, fraction):
"""
Returns subset of dataframe corresponding to the currentSheet index
Args:
percentage: float between 0.0 and 1.0 indicating percentage of
data to return
"""
intervals = math.floor(self.n * fraction)
# need at least 5 data points
if intervals < 5:
intervals = 5
full_dataset = self.dataSet[self.sheetNames[self._currentSheet]]
subset = full_dataset[:intervals]
return subset
def getFullData(self):
return self.dataSet[self.sheetNames[self._currentSheet]]
def getDataModel(self):
"""
Returns PandasModel for the current dataFrame to be displayed
on a QTableWidget
"""
return PandasModel(self.getData())
def setupMetricNameDictionary(self):
"""
For allocation table. Allows the effort allocation to be placed in correct column.
Metric name maps to number of metric (from imported data).
"""
i = 0
for name in self.metricNames:
self.metricNameDictionary[name] = i
i += 1
def processFT(self, data):
"""
Processes raw FT data to fill in any gaps
Args:
data: Raw pandas dataframe
Returns:
data: Processed pandas dataframe
"""
# failure time
if 'FT' not in data:
data["FT"] = data["IF"].cumsum()
# inter failure time
elif 'IF' not in data:
data['IF'] = data['FT'].diff()
data['IF'].iloc[0] = data['FT'].iloc[0]
if 'FN' not in data:
data['FN'] = pd.Series([i+1 for i in range(data['FT'].size)])
return data
def initialNumCovariates(self, data):
"""
Calculates the number of covariates on a given sheet
"""
numCov = len(data.columns) - self.STATIC_COLUMNS
# log.debug("%d covariates.", self._numCovariates)
return numCov
def renameHeader(self, data, numCov):
"""
Renames column headers if covariate metrics are unnamed
"""
data.rename(columns={data.columns[0]:"Time"}, inplace=True)
data.rename(columns={data.columns[1]:"Failures"}, inplace=True)
for i in range(numCov):
data.rename(columns={data.columns[i+2]:"C{0}".format(i+1)}, inplace=True) # changed from MetricX to CX
def importFile(self, fname):
"""
Imports data file
Args:
fname : Filename of csv or excel file
"""
self.filename, fileExtenstion = os.path.splitext(fname)
if fileExtenstion == ".csv":
if self.hasHeader(fname, fileExtenstion):
# data has header, can read in normally
data = {}
data["None"] = pd.read_csv(fname)
else:
# data does not have a header, need to specify
data = {}
data["None"] = pd.read_csv(fname, header=None)
else:
if self.hasHeader(fname, fileExtenstion):
# data has header, can read in normally
# *** don't think it takes into account differences in sheets
data = pd.read_excel(fname, sheet_name=None, engine="openpyxl")
else:
data =
|
pd.read_excel(fname, sheet_name=None, header=None, engine="openpyxl")
|
pandas.read_excel
|
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from oolearning.evaluators.ConfusionMatrix import ConfusionMatrix
class TwoClassConfusionMatrix(ConfusionMatrix):
"""
Class representing a confusion matrix for two-class (or 2 category) classifiers.
| | Predicted Negative | Predicted Positive |
| ---------------- | ------------------ | ------------------ |
| Actual Negative | True Negative | False Positive |
| Actual Positive | False Negative | True Positive |
"""
def __init__(self,
actual_classes: np.ndarray,
predicted_classes: np.ndarray,
positive_class: object):
unique_classes = list(set(actual_classes)) # get unique values then convert to list
assert len(unique_classes) == 2
negative_class = unique_classes[0] if positive_class == unique_classes[1] else unique_classes[1]
super().__init__(actual_classes=actual_classes,
predicted_classes=predicted_classes,
class_order=[negative_class, positive_class])
self._positive_class = positive_class
self._negative_class = negative_class
category_list = [self._negative_class, self._positive_class]
self._actual_positives = self.matrix.loc[self._positive_class][category_list].sum()
self._actual_negatives = self.matrix.loc[self._negative_class][category_list].sum()
self._true_positives = self.matrix.loc[self._positive_class, self._positive_class]
self._true_negatives = self.matrix.loc[self._negative_class, self._negative_class]
self._false_positives = self.matrix.loc[self._negative_class, self._positive_class]
self._false_negatives = self.matrix.loc[self._positive_class, self._negative_class]
@property
def sensitivity(self) -> float:
"""
:return: a.k.a true positive rate
"""
return 0 if self._actual_positives == 0 else self._true_positives / self._actual_positives
@property
def specificity(self) -> float:
"""
:return: a.k.a false positive rate
"""
return 0 if self._actual_negatives == 0 else self._true_negatives / self._actual_negatives
@property
def true_positive_rate(self) -> float:
return self.sensitivity
@property
def true_negative_rate(self) -> float:
return self.specificity
@property
def false_negative_rate(self) -> float:
return 0 if self._actual_positives == 0 else self._false_negatives / self._actual_positives
@property
def false_positive_rate(self) -> float:
return 0 if self._actual_negatives == 0 else self._false_positives / self._actual_negatives
@property
def accuracy(self) -> Union[float, None]:
return None if self.total_observations == 0 else \
(self._true_negatives + self._true_positives) / self.total_observations
@property
def error_rate(self) -> Union[float, None]:
return None if self.total_observations == 0 else \
(self._false_positives + self._false_negatives) / self.total_observations
@property
def positive_predictive_value(self) -> float:
return 0 if (self._true_positives + self._false_positives) == 0 else \
self._true_positives / (self._true_positives + self._false_positives)
@property
def negative_predictive_value(self) -> float:
return 0 if (self._true_negatives + self._false_negatives) == 0 else \
self._true_negatives / (self._true_negatives + self._false_negatives)
@property
def prevalence(self) -> Union[float, None]:
return None if self.total_observations == 0 else \
(self._true_positives + self._false_negatives) / self.total_observations
@property
def kappa(self) -> Union[float, None]:
if self.total_observations == 0 or \
((self._true_negatives + self._false_negatives) / self.total_observations) == 0:
return None
# proportion of the actual agreements
# add the proportion of all instances where the predicted type and actual type agree
pr_a = (self._true_negatives + self._true_positives) / self.total_observations
# probability of both predicted and actual being negative
p_negative_prediction_and_actual = \
((self._true_negatives + self._false_positives) / self.total_observations) * \
((self._true_negatives + self._false_negatives) / self.total_observations)
# probability of both predicted and actual being positive
p_positive_prediction_and_actual = \
self.prevalence * ((self._false_positives + self._true_positives) / self.total_observations)
# probability that chance alone would lead the predicted and actual values to match, under the
# assumption that both are selected randomly (i.e. implies independence) according to the observed
# proportions (probability of independent events = P(A & B) == P(A) * P(B)
pr_e = p_negative_prediction_and_actual + p_positive_prediction_and_actual
return (pr_a - pr_e) / (1 - pr_e)
@property
def f1_score(self) -> float:
return self.fbeta_score(beta=1)
def fbeta_score(self, beta: float) -> float:
"""
:param beta: The `beta` parameter determines the weight of precision in the combined score.
`beta < 1` lends more weight to precision, while
`beta > 1` favors recall
(`beta -> 0` considers only precision, `beta -> inf` only recall).
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html
:return:
"""
if self.positive_predictive_value is None or self.sensitivity is None or \
(self.positive_predictive_value + self.sensitivity) == 0:
return 0
return (1 + (beta**2)) * (self.positive_predictive_value * self.sensitivity) / \
(((beta**2) * self.positive_predictive_value) + self.sensitivity)
@property
def all_quality_metrics(self) -> dict:
"""
:return: dictionary with all the score_names and associated values
"""
return {'Kappa': self.kappa,
'F1 Score': self.f1_score,
'Two-Class Accuracy': self.accuracy,
'Error Rate': self.error_rate,
'True Positive Rate': self.sensitivity,
'True Negative Rate': self.specificity,
'False Positive Rate': self.false_positive_rate,
'False Negative Rate': self.false_negative_rate,
'Positive Predictive Value': self.positive_predictive_value,
'Negative Predictive Value': self.negative_predictive_value,
'Prevalence': self.prevalence,
'No Information Rate': max(self.prevalence, 1-self.prevalence), # i.e. largest class %
'Total Observations': self.total_observations}
def plot_all_quality_metrics(self, comparison_matrix: "TwoClassConfusionMatrix" = None):
"""
Creates a plot that shows all of the quality score_names in this class.
:param comparison_matrix: adds additional points to the plot for the score_names associated with the
`comparison_matrix`; allows the user to compare two different confusion matrices (e.g. from two
different models
"""
# convert diction to dataframe, without "Total Observations" which will fuck up axis
# noinspection PyTypeChecker
metrics_dataframe =
|
pd.DataFrame.from_dict([self.all_quality_metrics])
|
pandas.DataFrame.from_dict
|
# Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pandas as pd
from moonshot import Moonshot
from moonshot.cache import TMP_DIR
class PositionsClosedDailyTestCase(unittest.TestCase):
def tearDown(self):
"""
Remove cached files.
"""
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
def test_positions_closed_daily(self):
"""
Tests that the resulting DataFrames are correct after running a
short-only intraday strategy with POSITIONS_CLOSED_DAILY = True.
"""
class ShortAbove10Intraday(Moonshot):
"""
A basic test strategy that shorts above 10 and holds intraday.
"""
POSITIONS_CLOSED_DAILY = True
SLIPPAGE_BPS = 10
def prices_to_signals(self, prices):
morning_prices = prices.loc["Open"].xs("09:30:00", level="Time")
short_signals = morning_prices > 10
return -short_signals.astype(int)
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
# enter on same day
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
# hold from 10:00-16:00
closes = prices.loc["Close"]
entry_prices = closes.xs("09:30:00", level="Time")
exit_prices = closes.xs("15:30:00", level="Time")
pct_changes = (exit_prices - entry_prices) / entry_prices
gross_returns = pct_changes * positions
return gross_returns
def mock_get_prices(*args, **kwargs):
dt_idx =
|
pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03"])
|
pandas.DatetimeIndex
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import time
from flask.globals import request
from app.home import blueprint
from flask import render_template, redirect, url_for
from flask_login import login_required, current_user
from app import login_manager
from jinja2 import TemplateNotFound
from flask import jsonify
import matplotlib.pyplot as plt
import nltk
import pandas as pd
import praw
import squarify
from flask import Flask, render_template
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import os
from app.settings import APP_STATIC
from data import *
from app.base.models import User, Picks
from app import db
nltk.download('stopwords')
set(stopwords.words('english'))
@blueprint.route('/index')
#@login_required
def index1():
return render_template('core/reddit-index.html')
@blueprint.route('/index1')
#@login_required
def index():
# db.drop_all()
# db.create_all()
#found=Picks.query.all()
arr=[]
for i in Picks.query.all():
print(i.__dict__)
temp = i
#temp.time = int(time.mktime(temp.time.timetuple())) * 1000
del temp._sa_instance_state
arr.append(temp.__dict__)
return render_template('index.html', time=12345, df=arr)
@blueprint.route('/reddit-index')
def my_form():
return render_template('core/reddit-index.html')
@blueprint.route('/reddit-index', methods=['POST'])
def my_form_input():
input = {
'subs': request.form['subs'] if request.form['subs'] else ['wallstreetbets'],
'post_flairs': request.form['post_flairs'] if request.form['post_flairs'] else {'Daily Discussion', 'Weekend Discussion', 'Discussion'},
'goodAuth': request.form['goodAuth'] if request.form['goodAuth'] else{'AutoModerator'},
'uniqueCmt': request.form['uniqueCmt'] if request.form['uniqueCmt'] else True,
'ignoreAuthP': request.form['ignoreAuthP'] if request.form['ignoreAuthP'] else {'example'},
'ignoreAuthC': request.form['ignoreAuthC'] if request.form['ignoreAuthC'] else {'example,'},
'upvoteRatio': request.form['upvoteRatio'] if request.form['upvoteRatio'] else 0.70,
'ups': request.form['ups'] if request.form['ups'] else 20,
'limit': request.form['limit'] if request.form['limit'] else 500,
'upvotes': request.form['upvotes'] if request.form['upvotes'] else 2,
'picks': request.form['picks'] if request.form['picks'] else 10,
'picks_ayz': request.form['picks_ayz'] if request.form['picks_ayz'] else 5,
}
print("input is", input)
return render_template('core/reddit-index.html')
@ blueprint.route('/data', methods=['POST', 'GET'])
def my_form_post():
import time
start_time = time.time()
ctime = time.ctime()
print('time is', time.ctime())
reddit = praw.Reddit(user_agent="Comment Extraction",
client_id="ZM9jcd0nyXvtlA",
client_secret="<KEY>",
username="",
password="")
'''############################################################################'''
# set the program parameters
subs = ['wallstreetbets'] # sub-reddit to search
# posts flairs to search || None flair is automatically considered
post_flairs = {'Daily Discussion', 'Weekend Discussion', 'Discussion'}
# authors whom comments are allowed more than once
goodAuth = {'AutoModerator'}
uniqueCmt = True # allow one comment per author per symbol
ignoreAuthP = {'example'} # authors to ignore for posts
ignoreAuthC = {'example'} # authors to ignore for comment
upvoteRatio = 0.70 # upvote ratio for post to be considered, 0.70 = 70%
ups = 20 # define # of upvotes, post is considered if upvotes exceed this #
limit = 5 # define the limit, comments 'replace more' limit
upvotes = 2 # define # of upvotes, comment is considered if upvotes exceed this #
picks = 10 # define # of picks here, prints as "Top ## picks are:"
picks_ayz = 5 # define # of picks for sentiment analysis
'''############################################################################'''
posts, count, c_analyzed, tickers, titles, a_comments = 0, 0, 0, {}, [], {}
cmt_auth = {}
num = 0
comm = 0
for sub in subs:
subreddit = reddit.subreddit(sub)
hot_python = subreddit.hot() # sorting posts by hot
# Extracting comments, symbols from subreddit
print("running", str(hot_python))
for submission in hot_python:
flair = submission.link_flair_text
author = submission.author.name
# custom write func
file = open(os.path.join(APP_STATIC, "output/sample.py"),
"w", encoding='utf-8')
hotlist = [i for i in hot_python]
file.write("start time was %s num is %d and hotlist is %s " %
(str(time.ctime()), num, str(hotlist)))
print('num is', num)
file.close()
num += 1
# checking: post upvote ratio # of upvotes, post flair, and author
if submission.upvote_ratio >= upvoteRatio and submission.ups > ups and (flair in post_flairs or flair is None) and author not in ignoreAuthP:
submission.comment_sort = 'new'
comments = submission.comments
titles.append(submission.title)
posts += 1
try:
submission.comments.replace_more(limit=limit)
for comment in comments:
file = open(os.path.join(
APP_STATIC, "output/sample.py"), "a", encoding='utf-8')
file.write("comnum is %d and comm is %s " %
(comm, str(comment)))
file.close()
comm += 1
#print("comnum is", comm)
# try except for deleted account?
try:
auth = comment.author.name
except:
pass
c_analyzed += 1
# checking: comment upvotes and author
if comment.score > upvotes and auth not in ignoreAuthC:
split = comment.body.split(" ")
for word in split:
word = word.replace("$", "")
# upper = ticker, length of ticker <= 5, excluded words,
if word.isupper() and len(word) <= 5 and word not in blacklist and word in us:
# unique comments, try/except for key errors
if uniqueCmt and auth not in goodAuth:
try:
if auth in cmt_auth[word]:
break
except:
pass
# counting tickers
if word in tickers:
tickers[word] += 1
a_comments[word].append(comment.body)
cmt_auth[word].append(auth)
count += 1
else:
tickers[word] = 1
cmt_auth[word] = [auth]
a_comments[word] = [comment.body]
count += 1
except Exception as e:
print(e)
# sorts the dictionary
symbols = dict(
sorted(tickers.items(), key=lambda item: item[1], reverse=True))
top_picks = list(symbols.keys())[0:picks]
time = (time.time() - start_time)
# print top picks
print("It took {t:.2f} seconds to analyze {c} comments in {p} posts in {s} subreddits.\n".format(
t=time, c=c_analyzed, p=posts, s=len(subs)))
print("Posts analyzed saved in titles")
# for i in titles: print(i) # prints the title of the posts analyzed
print(f"\n{picks} most mentioned picks: ")
times = []
top = []
for i in top_picks:
print(f"{i}: {symbols[i]}")
times.append(symbols[i])
top.append(f"{i}: {symbols[i]}")
# Applying Sentiment Analysis
scores, s = {}, {}
vader = SentimentIntensityAnalyzer()
# adding custom words from data.py
vader.lexicon.update(new_words)
picks_sentiment = list(symbols.keys())[0:picks_ayz]
for symbol in picks_sentiment:
stock_comments = a_comments[symbol]
for cmnt in stock_comments:
score = vader.polarity_scores(cmnt)
if symbol in s:
s[symbol][cmnt] = score
else:
s[symbol] = {cmnt: score}
if symbol in scores:
for key, _ in score.items():
scores[symbol][key] += score[key]
else:
scores[symbol] = score
# calculating avg.
for key in score:
scores[symbol][key] = scores[symbol][key] / symbols[symbol]
scores[symbol][key] = "{pol:.3f}".format(pol=scores[symbol][key])
picksdb = Picks(pick=scores)
timesdb = Picks(pick=[times, top, top_picks])
# print(picks)
db.session.add(picksdb)
db.session.add(timesdb)
db.session.commit()
# printing sentiment analysis
print(f"\nSentiment analysis of top {picks_ayz} picks:")
df =
|
pd.DataFrame(scores)
|
pandas.DataFrame
|
#%%
import re
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import jsonlines
from datetime import date
sns.set()
# %%
with jsonlines.open("companyinfo/items.json", "r") as f:
lst = [obj for obj in f]
def parse_jsonl(lst):
csos = []
people = []
for item in lst:
corp = item["corporation"]
corp_out = {}
for field in ["name", "idCode", "address", "email"]:
corp_out[field] = corp[field]
if corp.get("registrationDate"):
corp_out["registrationDate"] = corp["registrationDate"]["date"]
csos.append(corp_out)
affils = item["corporationAffiliations"]
for affil in affils:
affil["corpId"] = corp["idCode"]
people.append(affil)
return csos, people
# def extract_loc(x):
# if isinstance(x, str):
# try:
# loc = x.split(",")[1]
# except IndexError:
# return x
# loc = re.sub(r".\.|ქალაქი", "", loc).strip()
# return loc
csos, people = [pd.DataFrame(i) for i in parse_jsonl(lst)]
# %%
csos["date"] = pd.to_datetime(csos["registrationDate"], errors="coerce")
#%%
# %%
fig, ax = plt.subplots(figsize=(15, 5))
to_plot = (
csos.query("(date > 1990) and (date < 2021)")
.groupby("date")["idCode"]
.count()
.resample("M")
.sum()
.rolling(3)
.mean()
.reset_index()
)
sns.lineplot(x="date", y="idCode", data=to_plot, ax=ax)
key_dates = [
("Civil Code\nof Georgia\n(1999)", date(1999, 1, 1), 150),
("Rose Revolution\n(2003)", date(2003, 11, 3), 155),
("Saakashvili\nDemonstrations\n(2007)", date(2007, 11, 1), 185),
("Parliamentary\nElections\n(2012)", date(2012, 10, 1), 185),
("Parliamentary\nElections\n(2016)", date(2016, 10, 1), 210),
]
for t, d, h in key_dates:
ax.text(x=d, y=h, s=t, fontdict={"ha": "center"})
ax.xaxis.set_major_locator(mdates.YearLocator(base=5))
ax.set_xlim(date(1990, 1, 1), date(2021, 4, 1))
ax.set_ylim(0, 250)
ax.set_xlabel("")
ax.set_ylabel("New CSO registrations (monthly)")
plt.figtext(0.9, -0.02, "Source: Public Registry via companyinfo.ge", ha="right")
plt.savefig("registrations.png", bbox_inches="tight")
plt.show()
# %%
multi = people.drop_duplicates(subset=["personId", "corpId"])[
"personName"
].value_counts()
# %%
adj = (
csos[["idCode", "name", "email"]]
.merge(
people[["personName", "personId", "corpId"]],
left_on="idCode",
right_on="corpId",
)
.drop_duplicates()
)
adj = adj[adj.personName.isin(multi[multi > 1].index)]
nodes = (
pd.concat(
[
|
pd.DataFrame(adj[["idCode", "name", "email"]].values)
|
pandas.DataFrame
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Time Based Regression geoexperiment methodology.
"""
import collections
import functools
from matched_markets.methodology import semantics
from matched_markets.methodology import utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import statsmodels.api as sm
class TBR(object):
"""Time Based Regression geoexperiment methodology.
This class models the relationship between control and treatment time series.
For details see [Kerman 2017](https://ai.google/research/pubs/pub45950).
"""
def __init__(self, use_cooldown=True):
"""Initializes a TBR analysis.
Args:
use_cooldown: bool. Whether cooldown period should be utilised.
"""
self.df_names = None
self.groups = None
self.periods = None
self.analysis_data = None
self.target = None
# Set up container for the response model, and potentially a cost model.
self.pre_period_model = None
self.use_cooldown = use_cooldown
def fit(self, data_frame, target, **kwargs):
"""Fit the TBR model to the supplied data frame.
See optional kwargs for interpretation of the data frame.
Args:
data_frame: a pandas.DataFrame. Should contain the columns and indices
corresponding to the **kwargs information below. Only one of response
or cost need be present, corresponding to the supplied `target`. Must be
indexed by date.
target: `str`. The name of the column to be analysed.
**kwargs: optional column/index names for the data and related semantics:
key_geo='geo' - geo data frame index name.
key_period='period' - experimental period column name.
key_group='group' - group assignment column name.
key_cost='cost' - cost column name.
key_response='response' - response column name.
key_date='date' - date index name.
key_incr_cost='_incr_cost' - incremental cost column name.
key_incr_response='_incr_response' - incremental response column name.
group_control=1 - value representing the control group in the data.
group_treatment=2 - value representing the treatment group in the data.
period_pre=0 - value representing the pre-test period in the data.
period_test=1 - value representing the test period in the data.
period_cool=2 - value representing the cooldown period in the data.
"""
# Set the target of the analysis.
self.target = target
# Extract any column / index name information supplied by the user.
user_df_names = utils.kwarg_subdict('key_', **kwargs)
self.df_names = semantics.DataFrameNameMapping(**user_df_names)
# Extract any semantics for control / treatment supplied by user.
user_group_semantics = utils.kwarg_subdict('group_', **kwargs)
self.groups = semantics.GroupSemantics(**user_group_semantics)
# Extract any semantics for experimental period supplied by user.
user_period_semantics = utils.kwarg_subdict('period_', **kwargs)
self.periods = semantics.PeriodSemantics(**user_period_semantics)
# Set up the analysis data.
self._construct_analysis_data(data_frame)
# Fit pre-period models for response and for cost.
self._fit_pre_period_model()
def _construct_analysis_data(self, data):
"""Stores group-wise time series by aggregating over control/treat geos."""
preserve = [self.df_names.group, self.df_names.date]
agg_style = {
self.target: 'sum',
self.df_names.period: 'max' # preserve the period info of the ts.
}
self.analysis_data = data.groupby(preserve).agg(agg_style)
def _fit_pre_period_model(self):
"""Estimates the control-treatment relationship in the pre-period."""
# Get the pre- period data in the form needed for regression.
period_index = self.analysis_data[self.df_names.period] == self.periods.pre
treat_vec = self._response_vector(period_index)
cntrl_mat = self._design_matrix(period_index)
# Fit an OLS model to the pre- period data.
self.pre_period_model = sm.OLS(treat_vec.values, cntrl_mat.values).fit()
def predict(self, cntrl_mat):
"""Counterfactual prediction for treatment group series in the test period.
Args:
cntrl_mat: a T by 2 `np.matrix`, representing a constant concatenated
to the control group time series, with T the test period length.
Returns:
A vector representing the expected treatment group time series.
"""
return self.pre_period_model.predict(cntrl_mat)
def _make_period_index(self, periods):
"""Returns an index for analysis_data rows in the desired time periods.
Args:
periods: int or non-empty iterable of int. The labels of the periods to
consider.
Returns: a pandas.Series of bools indicating whether each time point lies in
the supplied periods.
Raises:
ValueError: if an empty periods argument is passed.
"""
# Ensure we can iterate through periods.
if not isinstance(periods, collections.Iterable):
period_itr = (periods,)
else:
if periods:
period_itr = periods
else:
raise ValueError('Periods must not be an empty iterable.')
# Construct a list of bool valued pandas.Series indicating for each period
# whether each time point is in that period.
subset = self.analysis_data[self.df_names.period]
indices = [subset == i for i in period_itr]
return functools.reduce(np.logical_or, indices)
def causal_effect(self, periods):
"""Returns the difference of the actual and counterfactual prediction.
Args:
periods: int or iterable of int. The labels of the periods to consider.
Returns:
A vector representing the estimated causal effect of the treatment on the
target variable.
"""
period_index = self._make_period_index(periods)
# Get the test- period data in the form needed for regression.
treat_vec = self._response_vector(period_index)
cntrl_mat = self._design_matrix(period_index)
# Calculate the causal effect of the campaign.
treat_counter = self.predict(cntrl_mat)
return treat_vec - treat_counter
def _response_vector(self, period_index):
"""Return the treatment group's time-series for the specified period."""
adata = self.analysis_data
return adata[period_index].loc[self.groups.treatment][self.target]
def _design_matrix(self, period_index):
"""Return the design matrix for `periods`."""
# Short variable names
adata = self.analysis_data
cntrl = self.groups.control
target = self.target
# Construct the design matrix.
cntrl_vec = adata[period_index].loc[cntrl][target]
cntrl_mat = cntrl_vec.to_frame()
cntrl_mat.insert(0, 'const', 1)
return cntrl_mat
def causal_cumulative_distribution(self,
time=None,
rescale=1.0,
periods=None):
"""Return the distribution of the cumulative causal effect.
Args:
time: `int`. If specified, returns only the cumulative distribution at
this time index.
rescale: `float`. Additional scaling factor for the t-distribution.
periods: optional tuple of `int` (default None). The periods over which to
infer causal effects. If not supplied, the periods considered will include
the test period and also the cooldown period if the model was constructed
with use_cooldown=True.
Returns:
A t-distribution of type `scipy.stats._distn_infrastructure.rv_frozen`.
"""
# Define periods to credit to test.
if self.use_cooldown and periods is None:
periods = (self.periods.test, self.periods.cooldown)
elif periods is None:
periods = (self.periods.test,)
# Predict the causal effects of the experiment on response.
causal_response = self.causal_effect(periods)
# Counter of length test period.
period_index = self._make_period_index(periods)
cntrl_mat = self._design_matrix(period_index)
len_test = cntrl_mat.shape[0]
one_to_t = np.arange(1, len_test + 1)
one_to_t.shape = (len_test, 1)
# Scale contribution from parameters
cntrl_cum_mat = np.array(np.array(cntrl_mat.cumsum()) / one_to_t)
# Obtain the parameter covariance matrix.
vsigma = np.array(self.pre_period_model.cov_params())
# Each point in test-period has a different contribution.
var_params = []
for t in np.arange(len_test):
# Sum of parameter variance terms from eqn 5 of Kerman 2017.
var_t = (cntrl_cum_mat[t,] @ vsigma @ cntrl_cum_mat[t,].T)
var_params.append(var_t)
var_params = np.array(var_params).reshape(len_test, 1)
# Scale the results by T\sigma^2
var_from_params = var_params * one_to_t**2
# Scale contribution from test observations.
sigmasq = self.pre_period_model.scale
var_from_observations = one_to_t * sigmasq
# Set up the t-distribution.
delta_mean = rescale * np.array(np.cumsum(causal_response)).flatten()
delta_var = var_from_params + var_from_observations
delta_scale = rescale * sp.sqrt(delta_var).flatten()
delta_df = self.pre_period_model.df_resid
# Return a frozen t-distribution with the correct parameters.
if time is None:
return sp.stats.t(delta_df, loc=delta_mean, scale=delta_scale)
else:
return sp.stats.t(delta_df, loc=delta_mean[time], scale=delta_scale[time])
def summary(self, level=0.9,
threshold=0.0,
tails=1,
report='last',
rescale=1.0):
"""Summarise the posterior of the cumulative causal effect, Delta.
Args:
level: `float` in (0,1). Determines width of CIs.
threshold: `float`. Tests whether Delta is greater than threshold.
tails: `int` in {1,2}. Specifies number of tails to use in tests.
report: `str`, whether to report on 'all' or 'last' day in test period.
rescale: `float`, an additional scaling factor for Delta.
Returns:
pd.DataFrame, a summary at level, with alpha=1-level, containing:
- estimate, the median of Delta.
- precision, distance between the (1-level)/tails and 0.5 quantiles.
- lower, the value of the (1-level)/tails quantile.
- upper, if tails=2, the level/tails quantile, otherwise inf.
- scale, the scale parameter of Delta.
- level, records the level parameter used to generate the report.
- threshold, records the threshold parameter.
- probability, the probability that Delta > threshold.
Raises:
ValueError: if tails is neither 1 nor 2.
ValueError: if level is outside of the interval [0,1].
"""
# Enforce constraints on the arguments.
if tails not in (1, 2):
raise ValueError('tails should be either 1 or 2.')
if level < 0.0 or level > 1.0:
raise ValueError('level should be between 0.0 and 1.0.')
# Calculate the relevant points to evaluate.
alpha = (1-level) / tails
if tails == 1:
pupper = 1.0
elif tails == 2:
pupper = 1.0 - alpha
# Obtain the appropriate posterior distribution.
delta = self.causal_cumulative_distribution(rescale=rescale)
# Define periods to credit to test.
if self.use_cooldown:
periods = [self.periods.test, self.periods.cooldown]
else:
periods = [self.periods.test]
# Facts about the date index.
dates = self.causal_effect(periods).index
ndates = len(dates)
dates_ones = np.ones(ndates)
# Data for the report.
values = {
'dates': dates,
'estimate': delta.mean(),
'precision': np.abs(delta.ppf(alpha) - delta.ppf(0.5)).reshape(ndates),
'lower': delta.ppf(alpha).reshape(ndates),
'upper': delta.ppf(pupper).reshape(ndates),
'scale': delta.kwds['scale'].reshape(ndates),
'level': level * dates_ones,
'posterior_threshold': threshold * dates_ones,
'probability': 1.0 - delta.cdf(threshold).reshape(ndates)
}
# Ordering for the report.
ordering = ['estimate',
'precision',
'lower',
'upper',
'scale',
'level',
'probability',
'posterior_threshold'
]
# Construct the report, put it in the desired ordering.
result = pd.DataFrame(values, index=dates)
result = result[ordering]
# Decide how much of the report to report.
if report == 'all':
lines = result.shape[0]
elif report == 'last':
lines = 1
# Return the report for `lines` last days of the test period.
return result.tail(lines)
def plot(self, target, experiment_dates=None, margin=0.05):
"""Plot the control and treatment time series for the target variable.
Args:
target: str. The name of the target variable.
experiment_dates: iterable of str. Dates to mark with a vertical line.
margin: float. Determines the space at the top and bottom of the y-axis.
"""
# Labels of the group timeseries to be plotted.
groups = [self.groups.treatment, self.groups.control]
# Set the plotting limits.
column = self.analysis_data[target]
colmax = column.max()
colmin = column.min()
gap = margin*max(np.abs(colmax), margin*np.abs(colmin))
ymax = colmax + gap
ymin = colmin - gap
# Plot the timeseries.
for i in groups:
plt.plot(self.analysis_data.loc[i][target], label='Group %s' % i)
plt.legend()
plt.ylim((ymin, ymax))
# Place vertical lines on important dates.
if experiment_dates:
date_marks =
|
pd.to_datetime(experiment_dates)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import sys
import math
import json
from collections import defaultdict
import pandas as pd
#计算每个特征的平均ctr
def cal_feature_avg_value():
res={}
file=open('./../new_data/train_ins_add')
res_file=open('./../new_data/feature_ctr_dic.txt','w')
# 先把词典初始化完成
res_dic=defaultdict(dict)
# 给字典填充值
index=0
for line in file.readlines():
click=line.split('\t')[1]
val_id_list=line.split('\t')[2].split(' ')
for val_id in val_id_list:
item=val_id.split(':')
if(len(item)!=2):
print(index)
print(line)
val=val_id.split(':')[0]
id=val_id.split(':')[1].strip('\n')
if val not in res_dic[id].keys():
res_dic[id][val]=[1,0]
if click=='1':
res_dic[id][val][1] += 1
else:
res_dic[id][val][0]+=1
if click=='1':
res_dic[id][val][1] += 1
index+=1
#计算平均ctr
for id ,value_list in res_dic.items():
value_num=len(value_list.keys())
sum=0
res[id]={}
for val,value in value_list.items():
#res_file.write(id+':'+val+' '+str(float(value[1])/value[0])+'\n')
res[id][val]=str(float(value[1])/value[0])
sum+=float(value[1])/value[0]
#print (id+":"+str(float(sum)/value_num))
res[id]['avg_ctr']=str(float(sum)/value_num)
res_file.write(json.dumps(res))
#print('done')
#计算每个特征的平均信息增益率,直接以特征为单位进行计算
def cal_feature_avg_entropy():
res_dic={}
feature_file=open('feature_list.txt')
file=open('./data/train_eval_ins_merge')
res_file=open('./data/ins_feature_entroy.txt','w')
feature_id_list=[]
# 先把词典初始化完成
for line in feature_file.readlines():
id=line.strip('\n').strip()
feature_id_list.append(id)
res_dic[id]=[0,0,0,0]
# 给字典填充值
index=0
pos=0
neg=0
for line in file.readlines():
read_list=[]
click=line.split('\t')[1]
if click=='1':
pos+=1
else:
neg+=1
val_id_list=line.split('\t')[2].split(' ')
for val_id in val_id_list:
id=val_id.split(':')[1].strip('\n')
#包含此id的正负样本数
if id not in read_list:
if click=='1' :
res_dic[id][0] += 1
else:
res_dic[id][1] += 1
read_list.append(id)
not_read_list=[tmp for tmp in feature_id_list if tmp not in read_list]
for item in not_read_list:
if click == '1':
res_dic[item][2]+=1
else:
res_dic[item][3]+=1
#总体信息熵
pos=float(pos)
neg=float(neg)
all_entroy=-pos/(pos+neg)*math.log(2,pos/(pos+neg))- neg/(pos+neg)*math.log(2,neg/(pos+neg))
#计算特征的信息增益率
for id,val_list in res_dic.items():
num=float(sum(val_list))
pos_num=float(val_list[0]+val_list[1])
neg_num = float(val_list[2] + val_list[3])
#计算固有值
#计算信息增益率
if pos_num==num or neg_num==num:
res_file.write(id+' '+str(all_entroy)+'\n')
else:
id_entroy = -pos_num / (pos_num + neg_num) * math.log(2, pos_num / (pos_num + neg_num)) - neg_num / (
pos_num + neg_num) * math.log(2, neg_num / (pos_num + neg_num))
entroy=pos_num/num*(-val_list[0]/pos_num * math.log(2,val_list[0]/pos_num)-val_list[2]/neg_num * math.log(2,val_list[2]/neg_num))
#print (id+' '+str((all_entroy-entroy)/id_entroy))
res_file.write(id+' '+str((all_entroy-entroy)/id_entroy)+'\n')
#计算每个样本的新的93维度离散特征,注意顺序
def create_appear_value():
res_file=open('eval_ins_93_dispersed.txt','w')
id_list=[]
list_file=open('feature_list.txt')
for line in list_file.readlines():
id =line.strip('\n')
id_list.append(id)
tmp=[str(id) for id in id_list]
res_file.write('label '+' '.join(tmp))
file = open('eval_ins')
for line in file.readlines():
click=line.split('\t')[1]
res = {}
res_list=[]
for id in id_list:
if line.find(':'+id+' ')>=0 or line.find(':'+id+'\n')>=0:
res[id]=1
else:
res[id]=0
for id in id_list:
res_list.append(res[id])
res_list1=[str(item) for item in res_list]
res_file.write(str(click)+' '+' '.join(res_list1)+'\n')
def create_ctr_dic():
file = open('./feature_ctr.txt')
res_file=open('./feature_ctr_dic.txt','w')
res={}
for line in file.readlines():
id=line.split(':')[0]
val=line.split(':')[1].split(' ')[0]
#行尾集的去掉‘\n’
ctr = line.split(' ')[1].strip('\n')
if id not in res.keys():
res[id]={}
res[id][val]=ctr
else:
if val not in res[id].keys():
res[id][val] = ctr
res_file.write(json.dumps(res))
#计算每个样本的新的93维度连续特征,注意保持顺序
def create_ctr_value():
#加载字典
dic=open('./../new_data/feature_ctr_dic.txt')
data=dic.readline()
ctr_dic=json.loads(data)
#保存平均ctr作为缺失值处理
id_list = []
list_file = open('./../new_data/feature_list.txt')
for line in list_file.readlines():
id = line.strip('\n')
id_list.append(id)
res_file=open('./../new_data/eval_ins_continues.txt','w')
file=open('./../new_data/eval_ins')
for line in file.readlines():
res = defaultdict(list)
res_list=[]
click=line.split('\t')[1]
val_list=line.split('\t')[2].split(' ')
for item in val_list:
val=item.split(':')[0]
id=item.split(':')[1].strip('\n')
if val in ctr_dic[id].keys():
res[id].append(ctr_dic[id][val])
for id in id_list:
if len(res[id])==0:
res_list.append(ctr_dic[id]['avg_ctr'])
else:
tmp=[float(val.encode('utf-8')) for val in res[id]]
res_list.append(str(sum(tmp)/len(tmp)))
res_file.write(str(click)+' '+' '.join(res_list)+'\n')
#去除全为1的列,并保存其下标
def delete_all_1_feature():
id_list = []
list_file = open('feature_list.txt')
for line in list_file.readlines():
id = line.strip('\n')
id_list.append(id)
index=0
res=[]
data=
|
pd.read_csv('./data/eval_ins_93_dispersed.txt',sep=' ')
|
pandas.read_csv
|
from dash.dependencies import Input, Output, State
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import dash_table
import numpy as np
import plotly.express as px
from apps.app import dash_app
from apps.template import app_layout
import datetime as dt
import requests
from tqdm import tqdm
import mysql.connector
from mysql.connector import errorcode
import sqlalchemy as db
import sys
engine = db.create_engine('mysql+mysqldb://root:[email protected]:3306/Kindred')
dash_app = dash_app
dash_app.layout = app_layout()
app = dash_app.server
@dash_app.callback(
[Output(component_id='get_data_table',component_property='data'),
Output(component_id='get_data_table',component_property='columns'),
Output(component_id='get_success',component_property='children'),
Output(component_id='get_store',component_property='data')],
Input(component_id='get_data',component_property='n_clicks'),
prevent_initial_call=True
)
def get_goal_data(n_clicks):
def prem_year_mapper():
return {
'PREM_20_21': 667,
'PREM_19_20':639,
'PREM_18_19':614,
'PREM_17_18':586,
'PREM_16_17':556,
}
def prem_team_mapper2():
return {
'Manchester City':14,
'Manchester United':3,
'Leicester City': 13,
'Chelsea':8,
'Liverpool':4,
'West Ham United':21,
'Tottenham Hotspur':19,
'Everton':10,
'Arsenal':5,
'Leeds United':12,
'Aston Villa':24,
'Wolverhampton Wanderers':63,
'Southampton':18,
'Burnley':54,
'Newcastle United':16,
'Crystal Palace':35,
'Brighton & Hove Albion':749,
'Fulham':55,
'West Bromwich Albion':64,
'Sheffield United':27
}
def get_data(year,team):
base_url = f'https://www.statbunker.com/competitions/TopGoalScorers?comp_id={year}&club_id={team}'
html = requests.get(base_url).content
df_list = pd.read_html(html)
return df_list
def main():
"""return dataframe of concated team data"""
res = pd.DataFrame()
years = prem_year_mapper()
teams = prem_team_mapper2()
for m,s in years.items():
for k,v in tqdm(teams.items()):
try:
data = get_data(s,v)[0]
data['team'] = [k for x in range(len(data))]
data['year'] = [m for x in range(len(data))]
res =
|
pd.concat([res,data],ignore_index=True)
|
pandas.concat
|
import pandas as pd
import numpy as np
from data import Data
import pickle
class Stats():
def __init__(self, data):
'''Enter dataclass of pandas dataframe'''
if isinstance(data, Data):
self.df = data.df
elif isinstance(data, pd.DataFrame):
self.df = data
self.totalsparsity = self.calc_sparsity()
self.featuresparsity = self.calc_featuresparsity()
self.constants = self.constantvalues()
self.corrfeatures = self.correlation()
self.mean = self.calc_mean()
self.nonzero = self.calc_nonzero()
self.zero = self.calc_zero()
self.min = self.calc_min()
self.max = self.calc_max()
self.stddv = self.calc_stddv()
self.q1 = self.calc_q1()
self.median = self.calc_median()
self.q3 = self.calc_q3()
def calc_sparsity(self):
'''Calculate the sparsity of the selected data'''
zeroes = 0
for column in self.df.columns:
zeroes += np.count_nonzero(self.df[column] == 0)
return zeroes / (self.df.shape[0] * self.df.shape[1])
def calc_featuresparsity(self):
'''Calculate sparsity per feature'''
df = self.df
result =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
import json
from datetime import datetime
import os
import pandas as pd
import numpy as np
def filenames(path):
"""
get file names from json folder to derive with data and timestamp
"""
files = os.listdir(path)
files_lst = []
for f in files:
dt = (f[12:20])
tm = (f[21:27])
dat = (f, dt, tm)
files_lst.append(dat)
def json_extract(json_data, i, col1, col2):
"""
extract two columns from json
"""
parsed1 = json_data['countries'][0]['cities'][0]['places'][i][col1]
parsed2 = json_data['countries'][0]['cities'][0]['places'][i][col2]
return parsed1, parsed2
def parse_json(file):
"""
read json file from folder
"""
path = (r'c:\users\steff\documents\datascience bootcamp\bike\json\\')
with open(path + file[0]) as f:
json_data = json.load(f)
return json_data
def unpacking_bike_numbers(column):
"""
getting unique list of bikes
"""
bike_unpack = pd.dataframe(df[column].tolist(), index=df.index)
colnames = list(bike_unpack.columns.values)
all_bikes = []
all_bikes = bike_unpack[0]
for c in colnames:
data = bike_unpack[c]
|
pd.concat([all_bikes, data])
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Tools for calculating the fatigue damage equivalent PSD. Adapted and
enhanced from the CAM versions.
"""
from types import SimpleNamespace
import itertools as it
import multiprocessing as mp
import numpy as np
import scipy.signal as signal
import pandas as pd
from pyyeti import cyclecount, srs, dsp
WN_ = None
SIG_ = None
ASV_ = None
BinAmps_ = None
Count_ = None
def _to_np_array(sh_arr):
return np.frombuffer(sh_arr[0]).reshape(sh_arr[1])
def _mk_par_globals(wn, sig, asv, binamps, count):
global WN_, SIG_, ASV_, BinAmps_, Count_
WN_ = _to_np_array(wn)
SIG_ = _to_np_array(sig)
ASV_ = _to_np_array(asv)
BinAmps_ = _to_np_array(binamps)
Count_ = _to_np_array(count)
def _dofde(args):
"""Utility routine for parallel processing"""
(j, (coeffunc, Q, dT, verbose)) = args
if verbose:
print(f"Processing frequency {WN_[j] / 2 / np.pi:8.2f} Hz", end="\r")
b, a = coeffunc(Q, dT, WN_[j])
resphist = signal.lfilter(b, a, SIG_)
ASV_[1, j] = abs(resphist).max()
ASV_[2, j] = np.var(resphist, ddof=1)
# use rainflow to count cycles:
ind = cyclecount.findap(resphist)
rf = cyclecount.rainflow(resphist[ind])
amp = rf["amp"]
count = rf["count"]
ASV_[0, j] = amp.max()
BinAmps_[j] *= ASV_[0, j]
# cumulative bin count:
for jj in range(BinAmps_.shape[1]):
pv = amp >= BinAmps_[j, jj]
Count_[j, jj] = np.sum(count[pv])
def fdepsd(
sig,
sr,
freq,
Q,
resp="absacce",
hpfilter=5.0,
winends="auto",
nbins=300,
T0=60.0,
rolloff="lanczos",
ppc=12,
parallel="auto",
maxcpu=14,
verbose=False,
):
r"""
Compute a fatigue damage equivalent PSD from a signal.
Parameters
----------
sig : 1d array_like
Base acceleration signal.
sr : scalar
Sample rate.
freq : array_like
Frequency vector in Hz. This defines the single DOF (SDOF)
systems to use.
Q : scalar > 0.5
Dynamic amplification factor :math:`Q = 1/(2\zeta)` where
:math:`\zeta` is the fraction of critical damping.
resp : string; optional
The type of response to base the damage calculations on:
========= =======================================
`resp` Damage is based on
========= =======================================
'absacce' absolute acceleration [#fde1]_
'pvelo' pseudo velocity [#fde2]_
========= =======================================
hpfilter : scalar or None; optional
High pass filter frequency; if None, no filtering is done.
If filtering is done, it is a 3rd order butterworth via
:func:`scipy.signal.lfilter`.
winends : None or 'auto' or dictionary; optional
If None, :func:`pyyeti.dsp.windowends` is not called. If
'auto', :func:`pyyeti.dsp.windowends` is called to apply a
0.25 second window or a 50 point window (whichever is smaller)
to the front. Otherwise, `winends` must be a dictionary of
arguments that will be passed to :func:`pyyeti.dsp.windowends`
(not including `signal`).
nbins : integer; optional
The number of amplitude levels at which to count cycles
T0 : scalar; optional
Specifies test duration in seconds
rolloff : string or function or None; optional
Indicate which method to use to account for the SRS roll off
when the minimum `ppc` value is not met. Either 'fft' or
'lanczos' seem the best. If a string, it must be one of these
values:
=========== ==========================================
`rolloff` Notes
=========== ==========================================
'fft' Use FFT to upsample data as needed. See
:func:`scipy.signal.resample`.
'lanczos' Use Lanczos resampling to upsample as
needed. See :func:`pyyeti.dsp.resample`.
'prefilter' Apply a high freq. gain filter to account
for the SRS roll-off. See
:func:`pyyeti.srs.preroll` for more
information. This option ignores `ppc`.
'linear' Use linear interpolation to increase the
points per cycle (this is not recommended;
method; it's only here as a test case).
'none' Don't do anything to enforce the minimum
`ppc`. Note error bounds listed above.
None Same as 'none'.
=========== ==========================================
If a function, the call signature is:
``sig_new, sr_new = rollfunc(sig, sr, ppc, frq)``. Here, `sig`
is 1d, len(time). The last three inputs are scalars. For
example, the 'fft' function is (trimmed of documentation)::
def fftroll(sig, sr, ppc, frq):
N = sig.shape[0]
if N > 1:
curppc = sr/frq
factor = int(np.ceil(ppc/curppc))
sig = signal.resample(sig, factor*N, axis=0)
sr *= factor
return sig, sr
ppc : scalar; optional
Specifies the minimum points per cycle for SRS calculations.
See also `rolloff`.
====== ==================================
`ppc` Maximum error at highest frequency
====== ==================================
3 81.61%
4 48.23%
5 31.58%
10 8.14% (minimum recommended `ppc`)
12 5.67%
15 3.64%
20 2.05%
25 1.31%
50 0.33%
====== ==================================
parallel : string; optional
Controls the parallelization of the calculations:
========== ============================================
`parallel` Notes
========== ============================================
'auto' Routine determines whether or not to run
parallel.
'no' Do not use parallel processing.
'yes' Use parallel processing. Beware, depending
on the particular problem, using parallel
processing can be slower than not using it.
On Windows, be sure the :func:`fdepsd` call
is contained within:
``if __name__ == "__main__":``
========== ============================================
maxcpu : integer or None; optional
Specifies maximum number of CPUs to use. If None, it is
internally set to 4/5 of available CPUs (as determined from
:func:`multiprocessing.cpu_count`).
verbose : bool; optional
If True, routine will print some status information.
Returns
-------
A SimpleNamespace with the members:
freq : 1d ndarray
Same as input `freq`.
psd : pandas DataFrame; ``len(freq) x 5``
The amplitude and damage based PSDs. The index is `freq` and
the five columns are: [G1, G2, G4, G8, G12]
=========== ===============================================
Name Description
=========== ===============================================
G1 The "G1" PSD (Mile's or similar equivalent from
SRS); uses the maximum cycle amplitude instead
of the raw SRS peak for each frequency. G1 is
not a damage-based PSD.
G2 The "G2" PSD of reference [#fde1]_; G2 >= G1 by
bounding lower amplitude counts down to 1/3 of
the maximum cycle amplitude. G2 is not a
damage-based PSD.
G4, G8, G12 The damage-based PSDs with fatigue exponents of
4, 8, and 12
=========== ===============================================
peakamp : pandas DataFrame; ``len(freq) x 5``
The peak response of SDOFs (single DOF oscillators) using each
PSD as a base input. The index and the five columns are the
same as for `psd`. The peaks are computed from the Mile's
equation (or similar if using ``resp='pvelo'``). The peak
factor used is ``sqrt(2*log(f*T0))``. Note that the first
column is, by definition, the maximum cycle amplitude for each
SDOF from the rainflow count (G1 was calculated from
this). Typically, this should be very close to the raw SRS
peaks contained in the `srs` output but a little lower since
SRS just grabs peaks without consideration of the opposite
peak.
binamps : pandas DataFrame; ``len(freq) x nbins``
A DataFrame of linearly spaced amplitude values defining the
cycle counting bins. The index is `freq` and the columns are
integers 0 to ``nbins - 1``. The values in each row (for a
specific frequency SDOF), range from 0.0 up to
``peakamp.loc[freq, "G1"] * (nbins - 1) / nbins``. In other
words, each value is the left-side amplitude boundary for that
bin. The next column for this matrix would be ``peakamp.loc[:,
"G1"]``.
count : pandas DataFrame; ``len(freq) x nbins``
Summary matrix of the rainflow cycle counts. Size corresponds
with `binamps` and the count is cumulative; that is, the count
in each entry includes cycles at the `binamps` amplitude and
above. Therefore, first column has total cycles for the SDOF.
bincount : pandas DataFrame; ``len(freq) x nbins``
Non-cumulative version of `count`. In other words, the values
are the number of cycles in the bin, left-side inclusive. The
last bin includes the count of maximum amplitude cycles.
di_sig : pandas DataFrame; ``len(freq) x 3``
Damage indicators computed from SDOF responses to the `sig`
signal. Index is `freq` and columns are ['b=4', 'b=8',
'b=12']. The value for each frequency is the sum of the cycle
count for a bin times its amplitude to the b power. That is,
for the j-th frequency, the indicator is::
amps = binamps.loc[freq[j]]
counts = bincount.loc[freq[j]]
di = (amps ** b) @ counts # dot product of two vectors
Note that this definition is slightly different than equation
14 from [#fde1]_ (would have to divide by the frequency), but
the same as equation 10 of [#fde2]_ without the constant.
di_test_part : pandas DataFrame; ``len(freq) x 3``
Test damage indicator without including the variance factor
(see note). Same size as `di_sig`. Each value depends only on
the frequency, `T0`, and the fatigue exponent ``b``. The ratio
of a signal damage indicator to the corresponding partial test
damage indicator is equal to the variance of the single DOF
response to the test raised to the ``b / 2`` power::
var_test ** (b / 2) = di_sig / di_test_part
.. note::
If the variance vactor (`var_test`) were included, then
the test damage indicator would be the same as
`di_sig`. This relationship is the basis of determining
the amplitude of the test signal.
var_test : pandas DataFrame; ``len(freq) x 3``
The required SDOF test response variances (see `di_test_part`
description). Same size as `di_sig`. The amplitude of the G4,
G8, and G12 columns of `psd` are computed from Mile's equation
(or similar) and `var_test`.
sig : 1d ndarray
The version of the input `sig` that is fed into the fatique
damage algorithm. This would be after any filtering,
windowing, and upsampling.
sr : scalar
The sample rate of the output `sig`.
srs : pandas Series; length = ``len(freq)``
The raw SRS peaks version of the first column in `amp`. See
`amp`. Index is `freq`.
var : pandas Series; length = ``len(freq)``
Vector of the SDOF response variances. Index is `freq`.
parallel : string
Either 'yes' or 'no' depending on whether parallel processing
was used or not.
ncpu : integer
Specifies the number of CPUs used.
resp : string
Same as the input `resp`.
Notes
-----
Steps (see [#fde1]_, [#fde2]_):
1. Resample signal to higher rate if highest frequency would
have less than `ppc` points-per-cycle. Method of increasing
the sample rate is controlled by the `rolloff` input.
2. For each frequency:
a. Compute the SDOF base-drive response
b. Calculate `srs` and `var` outputs
c. Use :func:`pyyeti.cyclecount.findap` to find cycle peaks
d. Use :func:`pyyeti.cyclecount.rainflow` to count cycles
and amplitudes
e. Put counts into amplitude bins
3. Calculate `g1` based on cycle amplitudes from maximum
amplitude (step 2d) and Mile's (or similar) equation.
4. Calculate `g2` to bound `g1` & lower amplitude cycles with
high counts. Ignore amplitudes < ``Amax/3``.
5. Calculate damage indicators from data with b = 4, 8, 12
where b is the fatigue exponent.
6. By equating the theoretical damage from a `T0` second random
vibration test to the damage from the input signal (step 5),
solve for the required test response variances for b = 4, 8,
12.
7. Solve for `g4`, `g8`, `g12` from the results of step 6 using
the Mile's equation (or similar); equations are shown below.
No checks are done regarding the suitability of this method for
the input data. It is recommended to read the references [#fde1]_
[#fde2]_ and do those checks (such as plotting Count or Time
vs. Amp**2 and comparing to theoretical).
The Mile's equation (or similar) is used in this methodology to
relate acceleration PSDs to peak responses. If `resp` is
'absacce', it is the Mile's equation:
.. math::
\sigma_{absacce}(f) = \sqrt{\frac{\pi}{2} \cdot f \cdot Q
\cdot PSD(f)}
If `resp` is 'pvelo', the similar equation is:
.. math::
\sigma_{pvelo}(f) = \sqrt{\frac{Q \cdot PSD(f)}{8 \pi f}}
Those two equations assume a flat acceleration PSD. Therefore, it
is recommended to compare SDOF responses from flight data (SRS) to
SDOF VRS responses from the developed specification (see
:func:`pyyeti.srs.vrs` to compute the VRS response in the
absolute-acceleration case). This is to check for conservatism.
Instead of using 3 for peak factor (for 3-rms or 3-sigma), use
:math:`\sqrt{2 \ln(f \cdot T_0)}` for the peak factor (derived
below). Also, enveloping multiple specifications from multiple Q's
is worth considering.
Note that this analysis can be time consuming; the time is
proportional to the number of frequencies multiplied by the number
of time steps in the signal.
The derivation of the peak factor is as follows. For the special
case of narrow band noise where the instantaneous amplitudes
follow the Gaussian distribution, the resulting probability
density function for the peak amplitudes follow the Rayleigh
distribution [#fde3]_. The single DOF response to Gaussian input
is reasonably estimated as Gaussian narrow band. Let this response
have the standard deviation :math:`\sigma`. From the Rayleigh
distribution, the probability of a peak being greater than
:math:`A` is:
.. math::
Prob[peak > A] = e ^ {\frac{-A^2}{2 \sigma^2}}
To estimate the maximum peak for the response of a single DOF
system with frequency :math:`f`, find the amplitude that would be
expected to occur once within the allotted time
(:math:`T_0`). That is, set the product of the probability of a
cycle amplitude being greater than :math:`A` and the number of
cycles equal to 1.0, and then solve for :math:`A`.
The number of cycles of :math:`f` Hz is :math:`N = f \cdot T_0`.
Therefore:
.. math::
\begin{aligned}
Prob[peak > A] \cdot N &= 1.0
e ^ {\frac{-A^2}{2 \sigma^2}} f \cdot T_0 &= 1.0
\frac{-A^2}{2 \sigma^2} &= \ln(1.0) - \ln(f \cdot T_0)
\frac{A^2}{2 \sigma^2} &= \ln(f \cdot T_0)
A &= \sqrt{2 \ln(f \cdot T_0)} \sigma
\end{aligned}
.. note::
In addition to the example shown below, this routine is
demonstrated in the pyYeti :ref:`tutorial`:
:doc:`/tutorials/fatigue`. There is also a link to the source
Jupyter notebook at the top of the tutorial.
References
----------
.. [#fde1] "Analysis of Nonstationary Vibroacoustic Flight Data
Using a Damage-Potential Basis"; <NAME>, <NAME>,
<NAME>; Journal of Spacecraft and Rockets, Vol 40, No. 5,
September-October 2003.
.. [#fde2] "Implementing the Fatigue Damage Spectrum and Fatigue
Damage Equivalent Vibration Testing"; <NAME>; 79th
Shock and Vibration Symposium, October 26 – 30, 2008.
.. [#fde3] Bendat, <NAME>., "Probability Functions for Random
Responses: Prediction of Peaks, Fatigue Damage, and
Catastrophic Failures", NASA Contractor Report 33 (NASA
CR-33), 1964.
See also
--------
:func:`scipy.signal.welch`, :func:`pyyeti.psd.psdmod`,
:func:`pyyeti.cyclecount.rainflow`, :func:`pyyeti.srs.srs`.
Examples
--------
Generate 60 second random signal to a pre-defined spec level,
compute the PSD several different ways and compare. Since it's 60
seconds, the damage-based PSDs should be fairly close to the input
spec level. The damage-based PSDs will be calculated with several
Qs and enveloped.
In this example, G2 envelopes G1, G4, G8, G12. This is not always
the case. For example, try TF=120; the damage-based curves go up
in order to fit equal damage in 60s.
One Count vs. Amp**2 plot is done for illustration.
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from pyyeti import psd, fdepsd
>>> import scipy.signal as signal
>>>
>>> TF = 60 # make a 60 second signal
>>> spec = np.array([[20, 1], [50, 1]])
>>> sig, sr, t = psd.psd2time(
... spec, ppc=10, fstart=20, fstop=50, df=1 / TF,
... winends=dict(portion=10), gettime=True)
>>>
>>> fig = plt.figure('Example', figsize=[9, 6])
>>> fig.clf()
>>> _ = plt.subplot(211)
>>> _ = plt.plot(t, sig)
>>> _ = plt.title(r'Input Signal - Specification Level = '
... '1.0 $g^{2}$/Hz')
>>> _ = plt.xlabel('Time (sec)')
>>> _ = plt.ylabel('Acceleration (g)')
>>> ax = plt.subplot(212)
>>> f, p = signal.welch(sig, sr, nperseg=sr)
>>> f2, p2 = psd.psdmod(sig, sr, nperseg=sr, timeslice=4,
... tsoverlap=0.5)
Calculate G1, G2, and the damage potential PSDs:
>>> psd_ = 0
>>> freq = np.arange(20., 50.1)
>>> for q in (10, 25, 50):
... fde = fdepsd.fdepsd(sig, sr, freq, q)
... psd_ = np.fmax(psd_, fde.psd)
>>> #
>>> _ = plt.plot(*spec.T, 'k--', lw=2.5, label='Spec')
>>> _ = plt.plot(f, p, label='Welch PSD')
>>> _ = plt.plot(f2, p2, label='PSDmod')
>>>
>>> # For plot, rename columns in DataFrame to include "Env":
>>> psd_ = (psd_
... .rename(columns={i: i + ' Env'
... for i in psd_.columns}))
>>> _ = psd_.plot.line(ax=ax)
>>> _ = plt.xlim(20, 50)
>>> _ = plt.title('PSD Comparison')
>>> _ = plt.xlabel('Freq (Hz)')
>>> _ = plt.ylabel(r'PSD ($g^{2}$/Hz)')
>>> _ = plt.legend(loc='upper left',
... bbox_to_anchor=(1.02, 1.),
... borderaxespad=0.)
>>> plt.tight_layout()
>>> fig.subplots_adjust(right=0.78)
.. plot::
:context: close-figs
Compare to theoretical bin counts @ 30 Hz:
>>> _ = plt.figure('Example 2')
>>> plt.clf()
>>> Frq = freq[np.searchsorted(freq, 30)]
>>> _ = plt.semilogy(fde.binamps.loc[Frq]**2,
... fde.count.loc[Frq],
... label='Data')
>>> # use flight time here (TF), not test time (T0)
>>> Amax2 = 2 * fde.var.loc[Frq] * np.log(Frq * TF)
>>> _ = plt.plot([0, Amax2], [Frq * TF, 1], label='Theory')
>>> y1 = fde.count.loc[Frq, 0]
>>> peakamp = fde.peakamp.loc[Frq]
>>> for j, lbl in enumerate(fde.peakamp.columns):
... _ = plt.plot([0, peakamp[j]**2], [y1, 1], label=lbl)
>>> _ = plt.title('Bin Count Check for Q=50, Freq=30 Hz')
>>> _ = plt.xlabel(r'$Amp^2$')
>>> _ = plt.ylabel('Count')
>>> _ = plt.legend(loc='best')
"""
sig, freq = np.atleast_1d(sig, freq)
if sig.ndim > 1 or freq.ndim > 1:
raise ValueError("`sig` and `freq` must both be 1d arrays")
if resp not in ("absacce", "pvelo"):
raise ValueError("`resp` must be 'absacce' or 'pvelo'")
(coeffunc, methfunc, rollfunc, ptr) = srs._process_inputs(
resp, "abs", rolloff, "primary"
)
if hpfilter is not None:
if verbose:
print(f"High pass filtering @ {hpfilter} Hz")
b, a = signal.butter(3, hpfilter / (sr / 2), "high")
# to try to get rid of filter transient at the beginning:
# - put a 0.25 second buffer on the front (length from
# looking at impulse response)
# - filter
# - chop off buffer
n = int(0.25 * sr)
sig2 = np.empty(n + sig.size)
sig2[:n] = sig[0]
sig2[n:] = sig
sig = signal.lfilter(b, a, sig2)[n:]
if winends == "auto":
sig = dsp.windowends(sig, min(int(0.25 * sr), 50))
elif winends is not None:
sig = dsp.windowends(sig, **winends)
mxfrq = freq.max()
curppc = sr / mxfrq
if rolloff == "prefilter":
sig, sr = rollfunc(sig, sr, ppc, mxfrq)
rollfunc = None
if curppc < ppc and rollfunc:
if verbose:
print(
f"Using {rolloff} method to increase sample rate (have "
f"only {curppc} pts/cycle @ {mxfrq} Hz"
)
sig, sr = rollfunc(sig, sr, ppc, mxfrq)
ppc = sr / mxfrq
if verbose:
print(f"After interpolation, have {ppc} pts/cycle @ {mxfrq} Hz\n")
LF = freq.size
dT = 1 / sr
pi = np.pi
Wn = 2 * pi * freq
parallel, ncpu = srs._process_parallel(
parallel, LF, sig.size, maxcpu, getresp=False
)
# allocate RAM:
if parallel == "yes":
# global shared vars will be: WN, SIG, ASV, BinAmps, Count
WN = (srs.copyToSharedArray(Wn), Wn.shape)
SIG = (srs.copyToSharedArray(sig), sig.shape)
ASV = (srs.createSharedArray((3, LF)), (3, LF))
BinAmps = (srs.createSharedArray((LF, nbins)), (LF, nbins))
a = _to_np_array(BinAmps)
a += np.arange(nbins, dtype=float) / nbins
Count = (srs.createSharedArray((LF, nbins)), (LF, nbins))
args = (coeffunc, Q, dT, verbose)
gvars = (WN, SIG, ASV, BinAmps, Count)
func = _dofde
with mp.Pool(
processes=ncpu, initializer=_mk_par_globals, initargs=gvars
) as pool:
for _ in pool.imap_unordered(func, zip(range(LF), it.repeat(args, LF))):
pass
ASV = _to_np_array(ASV)
Amax = ASV[0]
SRSmax = ASV[1]
Var = ASV[2]
Count = _to_np_array(Count)
BinAmps = a
else:
Amax = np.zeros(LF)
SRSmax = np.zeros(LF)
Var = np.zeros(LF)
BinAmps = np.zeros((LF, nbins))
BinAmps += np.arange(nbins, dtype=float) / nbins
Count = np.zeros((LF, nbins))
# loop over frequencies, calculating responses & counting
# cycles
for j, wn in enumerate(Wn):
if verbose:
print(f"Processing frequency {wn / 2 / pi:8.2f} Hz", end="\r")
b, a = coeffunc(Q, dT, wn)
resphist = signal.lfilter(b, a, sig)
SRSmax[j] = abs(resphist).max()
Var[j] = np.var(resphist, ddof=1)
# use rainflow to count cycles:
ind = cyclecount.findap(resphist)
rf = cyclecount.rainflow(resphist[ind])
amp = rf["amp"]
count = rf["count"]
Amax[j] = amp.max()
BinAmps[j] *= Amax[j]
# cumulative bin count:
for jj in range(nbins):
pv = amp >= BinAmps[j, jj]
Count[j, jj] = np.sum(count[pv])
if verbose:
print()
print("Computing outputs G1, G2, etc.")
# calculate non-cumulative counts per bin:
BinCount = np.hstack((Count[:, :-1] - Count[:, 1:], Count[:, -1:]))
# for calculating G2:
G2max = Amax ** 2
for j in range(LF):
pv = BinAmps[j] >= Amax[j] / 3 # ignore small amp cycles
if np.any(pv):
x = BinAmps[j, pv] ** 2
x2 = G2max[j]
y = np.log(Count[j, pv])
y1 = np.log(Count[j, 0])
g1y = np.interp(x, [0, x2], [y1, 0])
tantheta = (y - g1y) / x
k = np.argmax(tantheta)
if tantheta[k] > 0:
# g2 line is higher than g1 line, so find BinAmps**2
# where log(count) = 0; ie, solve for x-intercept in
# y = m x + b; (x, y) pts are: (0, y1), (x[k], y[k]):
G2max[j] = x[k] * y1 / (y1 - y[k])
# calculate flight-damage indicators for b = 4, 8 and 12:
b4 = 4
b8 = 8
b12 = 12
Df4 = np.zeros(LF)
Df8 = np.zeros(LF)
Df12 = np.zeros(LF)
for j in range(LF):
Df4[j] = (BinAmps[j] ** b4).dot(BinCount[j])
Df8[j] = (BinAmps[j] ** b8).dot(BinCount[j])
Df12[j] = (BinAmps[j] ** b12).dot(BinCount[j])
N0 = freq * T0
lnN0 = np.log(N0)
if resp == "absacce":
G1 = Amax ** 2 / (Q * pi * freq * lnN0)
G2 = G2max / (Q * pi * freq * lnN0)
# calculate test-damage indicators for b = 4, 8 and 12:
Abar = 2 * lnN0
Abar2 = Abar ** 2
Dt4 = N0 * 8 - (Abar2 + 4 * Abar + 8)
sig2_4 = np.sqrt(Df4 / Dt4)
G4 = sig2_4 / ((Q * pi / 2) * freq)
Abar3 = Abar2 * Abar
Abar4 = Abar2 * Abar2
Dt8 = N0 * 384 - (Abar4 + 8 * Abar3 + 48 * Abar2 + 192 * Abar + 384)
sig2_8 = (Df8 / Dt8) ** (1 / 4)
G8 = sig2_8 / ((Q * pi / 2) * freq)
Abar5 = Abar4 * Abar
Abar6 = Abar4 * Abar2
Dt12 = N0 * 46080 - (
Abar6
+ 12 * Abar5
+ 120 * Abar4
+ 960 * Abar3
+ 5760 * Abar2
+ 23040 * Abar
+ 46080
)
sig2_12 = (Df12 / Dt12) ** (1 / 6)
G12 = sig2_12 / ((Q * pi / 2) * freq)
Gmax = np.sqrt(np.vstack((G4, G8, G12)) * (Q * pi * freq * lnN0))
else:
G1 = (Amax ** 2 * 4 * pi * freq) / (Q * lnN0)
G2 = (G2max * 4 * pi * freq) / (Q * lnN0)
Dt4 = 2 * N0
sig2_4 = np.sqrt(Df4 / Dt4)
G4 = sig2_4 * ((4 * pi / Q) * freq)
Dt8 = 24 * N0
sig2_8 = (Df8 / Dt8) ** (1 / 4)
G8 = sig2_8 * ((4 * pi / Q) * freq)
Dt12 = 720 * N0
sig2_12 = (Df12 / Dt12) ** (1 / 6)
G12 = sig2_12 * ((4 * pi / Q) * freq)
Gmax = np.sqrt(np.vstack((G4, G8, G12)) * (Q * lnN0) / (4 * pi * freq))
# for output, scale the damage indicators:
Dt4 *= 4 # 2 ** (b/2)
Dt8 *= 16
Dt12 *= 64
# assemble outputs:
columns = ["G1", "G2", "G4", "G8", "G12"]
lcls = locals()
dct = {k: lcls[k] for k in columns}
Gpsd = pd.DataFrame(dct, columns=columns, index=freq)
Gpsd.index.name = "Frequency"
index = Gpsd.index
G2max = np.sqrt(G2max)
Gmax = pd.DataFrame(np.vstack((Amax, G2max, Gmax)).T, columns=columns, index=index)
BinAmps = pd.DataFrame(BinAmps, index=index)
Count = pd.DataFrame(Count, index=index)
BinCount = pd.DataFrame(BinCount, index=index)
Var =
|
pd.Series(Var, index=index)
|
pandas.Series
|
"""Display classification results of trained model.
Results:
- Training report: Table containing results on training data
- Test report: Table containing results on test data
- Training CM: Confusion matrix on training data
- Test CM: Confusion matrix on test data
"""
# %% Imports
# Standard system imports
from pathlib import Path
# Related third party imports
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Div
import joblib
import pandas as pd
from sklearn.metrics import classification_report
# Local application/library specific imports
from bokeh_server.results.plots.confusion_matrix import create_confusion_matrix
from bokeh_server.results.plots.report_table import create_report_table
# %% Define classification results
def classification_results():
"""Return layout containing classification results."""
# -------------------------------------------------------------------------
# Setup
# -------------------------------------------------------------------------
# Load data and model from volume
model_filename = Path('src/bokeh_server/data/model')
data_filename = Path('src/bokeh_server/data/train_data')
model = joblib.load(model_filename)
data = joblib.load(data_filename)
X_train = data['X_train']
X_test = data['X_test']
y_train = data['y_train']
y_test = data['y_test']
training_settings = data['training_settings']
dataset = training_settings['dataset']
# Classification report on training data
y_pred_train = model.predict(X_train)
train_report = classification_report(y_train, y_pred_train,
output_dict=True, zero_division=0)
train_df = pd.DataFrame(train_report).transpose()
train_df.reset_index(inplace=True) # Add index as a column to dataframe
train_df = train_df.round(2) # Round values to two decimal places
train_source = ColumnDataSource(train_df)
# Classification report on test data
y_pred_test = model.predict(X_test)
test_report = classification_report(y_test, y_pred_test, output_dict=True,
zero_division=0)
test_df =
|
pd.DataFrame(test_report)
|
pandas.DataFrame
|
#import libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#load data
df = pd.read_csv('data.csv')
df['created_at']=
|
pd.to_datetime(df['created_at'])
|
pandas.to_datetime
|
"""Estimate models with the method of simulated moments (MSM).
The method of simulated moments is developed by [1]_, [2]_, and [3]_ and an estimation
technique where the distance between the moments of the actual data and the moments
implied by the model parameters is minimized.
References:
.. [1] <NAME>. (1989). A method of simulated moments for estimation of discrete
response models without numerical integration. Econometrica: Journal of the
Econometric Society, 995-1026.
.. [2] <NAME>., & <NAME>. (1991). Simulation estimation of time-series models.
Journal of Econometrics, 47(2-3), 197-205.
.. [3] <NAME>., & <NAME>. (1993). Simulated Moments Estimation of Markov Models
of Asset Prices. Econometrica, 61(4), 929-952.
"""
import functools
import numpy as np
import pandas as pd
def get_msm_func(
simulate,
calc_moments,
empirical_moments,
replace_nans,
weighting_matrix=None,
additional_outputs=None,
):
"""Get the msm function.
Args:
simulate (callable): Function which accepts parameters and returns simulated
data.
calc_moments (callable or dict): Function(s) used to calculate simulated
moments. If it is a dictionary, it must have the same keys as
empirical_moments
empirical_moments (pandas.DataFrame or pandas.Series or dict): One pandas
object or a dictionary of pandas objects with empirical moments.
replace_nans (callable or list): Functions(s) specifying how to handle NaNs in
simulated_moments. Must match structure of empirical_moments. Exception: If
only one replacement function is specified, it will be used on all sets of
simulated moments.
weighting_matrix (numpy.ndarray): Square matrix of dimension (NxN) with N
denoting the number of empirical_moments. Used to weight squared moment
errors.
additional_outputs (dict or None): Dictionary of functions. Each function is
evaluated on the output of the simulate function and the result is
saved in the output dictionary of the msm function.
Returns:
msm_func (callable): MSM function where all arguments except the parameter
vector are set.
"""
if weighting_matrix is None:
weighting_matrix = get_diag_weighting_matrix(empirical_moments)
if not _is_diagonal(weighting_matrix):
raise ValueError("weighting_matrix must be diagonal.")
empirical_moments = _harmonize_input(empirical_moments)
calc_moments = _harmonize_input(calc_moments)
# If only one replacement function is given for multiple sets of moments, duplicate
# replacement function for all sets of simulated moments.
if callable(replace_nans):
replace_nans = {k: replace_nans for k in empirical_moments}
replace_nans = _harmonize_input(replace_nans)
if 1 < len(replace_nans) < len(empirical_moments):
raise ValueError(
"Replacement functions can only be matched 1:1 or 1:n with sets of "
"empirical moments."
)
elif len(replace_nans) > len(empirical_moments):
raise ValueError(
"There are more replacement functions than sets of empirical moments."
)
else:
pass
if len(calc_moments) != len(empirical_moments):
raise ValueError(
"Number of functions to calculate simulated moments must be equal to "
"the number of sets of empirical moments."
)
if additional_outputs is not None:
if not _is_dict_of_callables(additional_outputs):
raise ValueError("additional_outputs must be a dict of callables.")
else:
additional_outputs = {}
invalid_keys = {
"value",
"root_contributions",
"root_contributions",
"empirical_moments",
"simulated_moments",
}
invalid_present = invalid_keys.intersection(additional_outputs)
if invalid_present:
raise ValueError("Invalid keys in additional_outputs: {invalid}")
msm_func = functools.partial(
_msm,
simulate=simulate,
calc_moments=calc_moments,
empirical_moments=empirical_moments,
replace_nans=replace_nans,
weighting_matrix=weighting_matrix,
additional_outputs=additional_outputs,
)
return msm_func
def _msm(
params,
simulate,
calc_moments,
empirical_moments,
replace_nans,
weighting_matrix,
additional_outputs,
):
"""The MSM criterion function.
This function will be prepared by :func:`get_msm_func` and have all its arguments
except `params` attached to it.
"""
sim_out = simulate(params)
simulated_moments = {name: func(sim_out) for name, func in calc_moments.items()}
simulated_moments = {
name: sim_mom.reindex_like(empirical_moments[name])
for name, sim_mom in simulated_moments.items()
}
simulated_moments = {
name: replace_nans[name](sim_mom) for name, sim_mom in simulated_moments.items()
}
flat_empirical_moments = _flatten_index(empirical_moments)
flat_simulated_moments = _flatten_index(simulated_moments)
moment_errors = flat_simulated_moments - flat_empirical_moments
root_contribs = np.sqrt(np.diagonal(weighting_matrix)) * moment_errors
value = np.sum(root_contribs ** 2)
out = {
"value": value,
"root_contributions": root_contribs,
"empirical_moments": empirical_moments,
"simulated_moments": simulated_moments,
}
for name, func in additional_outputs.items():
out[name] = func(sim_out)
return out
def get_diag_weighting_matrix(empirical_moments, weights=None):
"""Create a diagonal weighting matrix from weights.
Args:
empirical_moments (pandas.DataFrame or pandas.Series or dict or list): Contains
the empirical moments calculated for the observed data. Moments should be
saved to pandas.DataFrame or pandas.Series that can either be passed to the
function directly or as items of a list or dictionary.
weights (pandas.DataFrame or pandas.Series or dict or list): Contains weights
(usually variances) of empirical moments. Must match structure of
empirical_moments i.e. if empirical_moments is a list of
:class:`pandas.DataFrame`, weights be list of pandas.DataFrames as well
where each DataFrame entry contains the weight for the corresponding moment
in empirical_moments.
Returns:
(numpy.ndarray): Array contains a diagonal weighting matrix.
"""
empirical_moments = _harmonize_input(empirical_moments)
# Use identity matrix if no weights are specified.
if weights is None:
flat_weights = _flatten_index(empirical_moments)
flat_weights[:] = 1
# Harmonize input weights.
else:
weights = _harmonize_input(weights)
# Reindex weights to ensure they are assigned to the correct moments in
# the msm function and convert scalars to pandas objects
cleaned = {}
for name, weight in weights.items():
if np.isscalar(weight):
nonscalar = empirical_moments[name].copy(deep=True)
nonscalar[:] = weight
cleaned[name] = nonscalar
else:
cleaned[name] = weight.reindex_like(empirical_moments[name])
flat_weights = _flatten_index(cleaned)
return np.diag(flat_weights)
def get_flat_moments(empirical_moments):
"""Compute the empirical moments flat indexes.
Args:
empirical_moments (pandas.DataFrame or pandas.Series or dict or list):
Containing pandas.DataFrame or pandas.Series. Contains the empirical moments
calculated for the observed data. Moments should be saved to
pandas.DataFrame or pandas.Series that can either be passed to the function
directly or as items of a list or dictionary.
Returns:
flat_empirical_moments (pandas.DataFrame): Vector of empirical_moments with flat
index.
"""
empirical_moments = _harmonize_input(empirical_moments)
flat_empirical_moments = _flatten_index(empirical_moments)
return flat_empirical_moments
def _harmonize_input(data):
"""Harmonize different types of inputs by turning all inputs into dicts."""
if isinstance(data, (pd.DataFrame, pd.Series)) or callable(data):
data = {0: data}
elif isinstance(data, dict):
pass
else:
raise ValueError(
"Moments must be pandas objects or dictionaries of pandas objects."
)
return data
def _flatten_index(data):
"""Flatten the index as a combination of the former index and the columns."""
data_flat = []
for name, series_or_df in data.items():
series_or_df = series_or_df.copy(deep=True)
series_or_df.index = series_or_df.index.map(str)
# Unstack DataFrames and Series to add columns/Series name to index.
if isinstance(series_or_df, pd.DataFrame):
df = series_or_df.rename(columns=lambda x: f"{name}_{x}")
# Series without a name are named using a counter to avoid duplicate indexes.
elif isinstance(series_or_df, pd.Series):
df = series_or_df.to_frame(name=f"{name}")
else:
raise NotImplementedError
# Columns to the index.
df = df.unstack()
df.index = df.index.to_flat_index().str.join("_")
data_flat.append(df)
return
|
pd.concat(data_flat)
|
pandas.concat
|
# Import package
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from collections import Counter
import networkx as nx
import sys
import statistics
import datetime
from scipy.signal import find_peaks
from scipy.ndimage import gaussian_filter1d
class DoaProcessor(object):
"""
This class provides functions to process DoA (Direction of Arrival) datafile.
DoA data file cotains direction of sound detected by the microphone array.
"""
def __init__(self,datafile,n):
"""
:param datafile: Name of the DoA data file.
:type datafile: str.
:param n: Number of speaker.
:param n: int.
"""
# Setting name of the log file
self.file_name = datafile
# Number of Speaker
self.n = n
# Dictionary to store direction for each user
if n==2:
self.Directions = {1:[],2:[]}
elif n==3:
self.Directions = {1:[],2:[],3:[]}
elif n==4:
self.Directions = {1:[],2:[],3:[],4:[]}
else:
print('PyDoA support groups with size 2,3,4. Please specify a valid group size.')
# Open the audio log file with three columns group, timestamp, degree
self.file = pd.read_csv(self.file_name,names=["group","timestamp","degree"])
print("PyDoA Library")
print('[',datetime.datetime.now(),']','Initialized')
print('[',datetime.datetime.now(),']','File loaded successfully')
def getGroups(self):
"""
This function extracts group information (e.g. number of groups, labels of groups)
:returns: list -- List of group labels
"""
return self.file.group.unique()
def getGroupFrame(self,group):
"""
This function extracts DoA data for a specific group.
:param group: Group label.
:type group: str
:returns: Pandas DataFrame -- Dataframe with columns timestamp, directions
"""
# Using pandas loc function to filter data
temp_df = self.file.loc[self.file["group"]==group,:]
# return the dataframe
return temp_df
def plotDegreeDistribution(self,group='group-1'):
"""
This function plot the frequency distribution of degrees for specified group.
It simply count the degree frequency and plot a bar graph.
:param group: Label of group.
:type group: str
"""
selfdf = self.file.copy()
# Extract data for specified group
temp_df = selfdf.loc[selfdf['group']==group,:]
# Count the frequency of each degree in the file
degree_frequency = Counter(temp_df['degree'])
# Plot the bar graph for degree frequency if plot = True
plt.bar(degree_frequency.keys(),degree_frequency.values(),width=10)
plt.xlabel('Direction of Arrival')
plt.ylabel('Frequency')
plt.title('Frequncy distribution of DoA (Direction of Arrival) for '+group)
plt.show()
def setDegreeForSpeaker(self,degrees):
"""
This function set the degree for each speaker. For instance, if speakers are sitting at a particular degree (e.g. speaker-1 at 45 degree, speaker-2 at 135, etc). Those degrees can be used to differentiate among speakers.
:param degrees: List of degree having n items.
:type degrees: List
"""
if self.n == len(degrees):
for index in range(self.n):
self.Directions[index+1] = degrees[index]
else:
print('Mismatch between number of speakers and number of specified degreees')
def getPeakDegree(self,group='group-1',bins=36,sigma=2.0):
"""
This function will find the peaks from degree distribution.
It uses gaussian kernel to smoothify the degree distribution and then apply peak finding algorithm to detect peaks.
:param group: Group label.
:type group: str
:param bins: Bin size
:type bins: int
:param sigma: Sigma for Gaussian kernel
:type sigma: double
:returns: List -- list of peak degrees
"""
grp = self.getGroupFrame(group)
series = grp['degree']
count, division = np.histogram(series, bins=bins)
count = gaussian_filter1d(count,sigma)
peaks, props = find_peaks(count)
plt.figure()
plt.plot(division[:-1], count)
plt.xlabel('Direction of Arrival')
plt.ylabel('Frequency')
plt.show()
return division[peaks]
def getHighestNdegrees(self,sep=60,group='group-1'):
"""
This function will search through the directions for specfied group and extract n directions with highest frequencies.
It simply count the degree frequency and return n degrees which are seperated by particular degrees.
:param sep: Distance between speakers in degrees. Default values are 360/n.
:type sep: int
:param group: Group label.
:type group: str
:returns: List -- list containing n degrees with highest frequencies
"""
try:
# Read the file
sep = 360/self.n - 30
selfdf = self.file.copy()
# Extract data for specified group
temp_df = selfdf.loc[selfdf['group']==group,:]
# Count the frequency of each degree in the file
degree_frequency = Counter(temp_df['degree'])
#print(degree_frequency)
# Sort the degrees on the basis of their counted frequency
sorted_deg_freq = sorted(degree_frequency.items(),key=lambda x:x[1])
# Take six degree with higher frequencies
highest_degrees = sorted_deg_freq[-8:]
#print('Highest 10 degrees',highest_degrees)
# Sort the order of highest degrees and return
highest_degrees = sorted(highest_degrees,key=lambda x:x[0])
#print('Highest 10 degrees',highest_degrees)
high_four_degrees = []
# Get four highest degrees
for item in highest_degrees:
# If the list is emtpy
if len(high_four_degrees)==0:
high_four_degrees.append(item[0])
else:
# Check whether degrees are not close to already added degree
if abs(item[0]-high_four_degrees[-1])%360 > sep:
# if not then add it to the list
high_four_degrees.append(item[0])
else:
# If degree is close to already added degree then add the one with higher frequency
if item[1]>degree_frequency[high_four_degrees[-1]]:
high_four_degrees.remove(high_four_degrees[-1])
high_four_degrees.append(item[0])
else:
pass
# Return the four most occuring degrees
return high_four_degrees[-4:]
except Exception as e:
print('Exception:',sys.exc_info())
def assignUserLabel(self,group='group-1'):
"""
This function assigns the user identifiers on the basis of direction of arrival of sound.
This function assumes that participants are sitting clockwise around ReSpeaker. First participant in clockwise fasion is considered user-1 and so on.
:param group: Group label.
:type group: str
:returns: DataFrame -- Pandas Dataframe with column users for each detected direction
"""
# Get four highly occuring direction of arrival
#highDegrees = self.getHighestFourDegrees(plot=False,group=group)
#highDegrees = [45,135,225,315]
highDegrees = self.Directions.values()
# Considering degrees in ascending order corresponds to user1 to user4
users = np.array([item for item in highDegrees])
# This function takes the degree and check to which highly occruing degree it is more close to.
def assign_label(degree):
# Computer the absolute difference between four highly occuring degree and the argument
user_diff = np.absolute(users-degree)
# Identifying the minimum difference
min_diff = np.min(user_diff)
# Getting the indices of minimum element
indices = np.where(user_diff==min_diff)
# Getting the first index (np.where() returns a list, therefore we need to select the first element)
# Also np.where() returns indices (which starts from 0, whereas user identifier starts from 1.). We addedd 1 to the index to get the user identifier
ind = indices[0]+1
# Return the user identifier correpsonds to degree (parameter)
return ind[0]
# get dataframe for specified group
temp_df = self.getGroupFrame(group)
# Add one column to the pandas dataframe with name 'users' which contains corresponding user identifier
temp_df.loc[:,'users'] = temp_df['degree'].map(assign_label)
return temp_df
def getSpeakingTime(self,plot,time='sec',granularity=200,group='group-1'):
"""
This function computes the speaking time for each user.
:param plot: Flag for plotting speaking time.
:type plot: Bool
:param time: Time resolusion for computing speaking time.
:type time: str
Possible values 'sec','min','hour'
:param granularity: Duration of each detected direction
:type granularity: int
:param group: Group Label.
:type group: str
:returns: List -- list containing total speaking time for each user.
"""
# get dataframe for the specified group
spk_df = self.assignUserLabel(group)
# Count the frequency for each user
speech_count = spk_df.groupby('users').count()
# Create a dictionary for storing speaking time for each user and initialize it with zero
user_speak_time = dict()
for i in range(self.n):
user_speak_time[i+1]=0
# Iterate for each user
for i in range(self.n):
# If time unit is sec then multiply the frequency with 200/1000. As each entry represent user speaking behavior on scale of 200 ms.
# To convert it into second, we need to multiply the frequency count for specific user with 200/1000
if time=='sec':
user_speak_time[i+1] = speech_count.loc[i+1,'degree']*float(200/1000)
# Same as above but for time unit minute
elif time=='min':
user_speak_time[i+1] = speech_count.loc[i+1,'degree']*float(200/(60*1000))
# For time unit hour
elif time=='hour':
user_speak_time[i+1] = speech_count.loc[i+1,'degree']*float(200/(60*60*1000))
if plot:
plt.figure()
plt.bar(user_speak_time.keys(),user_speak_time.values())
plt.ylabel('Time(%s)' % time)
plt.xlabel('Users')
xlabels = []
for i in range(self.n):
xlabels.append('user-%d'%(i+1))
plt.xticks(np.arange(self.n)+1,xlabels)
plt.title('Speaking time for each user')
plt.show()
return user_speak_time
def generateEdgeFile(self,group='group-1',threshold=3,edge_filename='edge.txt'):
"""
This function generates a file containing the edge in the form of (i,j) where i and j represents users-i and user-j, and this sequence represent their speaking order.
If a user a speaks after user b then it will be considered an edge (b,a)
:param group: Group Label
:type group: str
:param threshold: This parameter specify the threshold to consider a valid speaking activity. For instance, if direction is detected for every 200 ms then a threshold=1 implies that if a user has five consecutive entries then it will be considered as speaking activity.
:type threshold: int
:param edge_filename: Name of the newly generated edge file.
:type edge_filename: str
:returns: List -- list containing item in the form (i,j) which represent edge between user-i and user-j.
"""
# dataframe for specified group
edge_file = self.assignUserLabel(group=group)
# Getting sequenc of speaking turn
sequence = edge_file['users'].to_numpy()
# Create a emplty data frame with column users and conti_frequency. Here, conti_frequency represents the continuous occurence of particular user.
# For instance, if a user speaks then there will be many entries for that particular user because one entry recorded for every 200 ms.
# We are considering if atleast 4 entries are found continuous then it will be treated as speaking activity.
df = pd.DataFrame(columns=['users','conti_frequency'])
# This function will count the number of continuous occurence
def count_conti_occurence(index):
# Set count to 0
count=0
# Starts from the given index
j = index
# Loop to iterate over the users sequence
while j<len(sequence):
# Increase the count if the element at given index (parameter) is same as the iterated element
if sequence[j] == sequence[index]:
count +=1
# If mismatch found, break the loop
else:
break
# Increases j
j +=1
# Return number of count for sequence[index] and index of first next occurence of different element.
return count,(j-index)
# Set i to 0 for the Loop
i = 0
# Iterate for entire sequence of users
while i < len(sequence):
# Call count_conti_occurence() function
count,diff = count_conti_occurence(i)
# Add continuous frequency of current user (sequence[i]) to the dataframe
df = df.append({'users':sequence[i],'conti_frequency':count},ignore_index=True)
# Move to next different element
i = i + diff
# We are considering speaking activtiy if there are 4 consecutive entries for one particular user
process_df = df.where(df.conti_frequency>threshold)
# Deleting other users with less than 4 consecutive entries
process_df.dropna(axis=0,how='any',inplace=True)
# Resultant sequence to generate edge file
processed_sequence = process_df['users'].to_numpy()
# Open a file to write the edges
file = open(edge_filename,'w')
# Create an empty list
edge_list = list()
# Create two variable node1 and node2 and set them to zero.
node1=node2=0
# Iterate over resultant users sequences
for i in range(len(processed_sequence)):
# For the first element
if node1==0:
# set node1 to the first element
node1=processed_sequence[i]
# For rest of the elements
else:
# Set the current element to node2
node2=processed_sequence[i]
if node1 != node2:
# Append the edge node1, node2 to the edge list
edge_list.append((node1,node2))
# Print the edge
#print("{},{}".format(node1,node2))
# Write the edge in the file
file.write("{},{}\n".format(node1,node2))
# Set the node1 as node2
node1=node2
# Close the file
file.close()
return edge_list
def drawNetwork(self,group='group-1'):
"""
This function draws an interaction network from the edge file generated from speaker's speaking order.
This network is drawn as weighted graph where the thickness of edge represents the frequency of interaction.
:param group: Group label.
:type group: str
"""
# Generate the edge edge_list
edge_list = self.generateEdgeFile(group)
# Get speaking time for each user
sp_beh = self.getSpeakingTime(plot=False,group=group)
# Compute average speaking time
sp_avg = sum(sp_beh.values())/float(len(sp_beh.values()))
# Create an empty graph using networkx library
G = nx.Graph()
# Iterate over edge list
for edge in edge_list:
# Check if the current edge already exist or not
if G.has_edge(edge[0],edge[1]):
# Get the weight of that edge
w = G[edge[0]][edge[1]]['weight']
# Remove it from the graph
G.remove_edge(edge[0],edge[1])
# Add it again with updated weight
G.add_edge(edge[0],edge[1],weight=w+.15)
else:
# If edge doesn't exist in the graph then add it with weight .5
G.add_edge(edge[0],edge[1],weight=.5)
# Layout for showing the network
pos = nx.spring_layout(G)
# Get the edges from the graph
edges = G.edges()
# Get the weight for every edge
weights = [G[u][v]['weight'] for u,v in edges]
# Generate the colormap for the each node on the basis of their speaking time
color_map = []
sizes=[]
sp_total = sum(sp_beh.values())
sp_std = statistics.stdev(sp_beh.values())
# iterate for each node in the graph
for node in G:
size = float(sp_beh[node]*10)/sp_total
sizes.append( 400 * (size+1))
dev = float(sp_beh[node]-sp_total)/sp_std
# Assign red color if speaking time is below average
if sp_beh[node] <= sp_avg:
color_map.append('red')
# Assign green for above average
else:
color_map.append('lawngreen')
#labels = {1:'User-1',2:'Pankaj',3:'Reet',4:'Tobias'}
# Draw the network
nx.draw(G, pos,node_size = sizes,node_color=color_map, edges=edges,width=weights,with_labels=True)
# Show the network
plt.show()
def generateWindowWiseSpeakingTime(self,window_size="30S",time='sec',group='group-1'):
"""
This function generates speaking time metric for total duration by dividing in specified time window and then computing speaking time for each of those window.
:param window_size: Size of time window.
:type window_size: str
Possible values
:param time: Time resolution of computer speaking time.
:type time: str
Possible values sec, min, hour
:param group: Group label.
:type group: str
:returns: DataFrame -- Dataframe with columns start_time, end_time, and speaking time for each user in that window.
"""
# get group's dataframe
df1=self.assignUserLabel(group)
# Setting timestamp as datetime
df1['timestamp'] = pd.to_datetime(df1['timestamp'])
# Setting the index
df1 = df1.set_index(
|
pd.DatetimeIndex(df1['timestamp'])
|
pandas.DatetimeIndex
|
# -*- coding:utf-8 -*-
"""
股票信息类
Created on 2019/01/02
@author: TabQ
@group : gugu
@contact: <EMAIL>
"""
from __future__ import division
import pandas as pd
from pandas.compat import StringIO
import json
import lxml.html
from lxml import etree
import random
import re
import time
from gugu.utility import Utility
from gugu.base import Base, cf
import sys
ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
]
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': random.choice(ua_list),
'Cache-Control': 'max-age=0',
}
class StockInfo(Base):
def stockProfiles(self):
"""
获取上市公司基于基本面的汇总数据信息
Return
--------
DataFrame or List: [{'symbol':, 'net_profit_cagr':, ...}, ...]
symbol: 代码
net_profit_cagr: 净利润复合年均增长率
ps: 市销率
percent: 涨幅
pb_ttm: 滚动市净率
float_shares: 流通股本
current: 当前价格
amplitude: 振幅
pcf: 市现率
current_year_percent: 今年涨幅
float_market_capital: 流通市值
market_capital: 总市值
dividend_yield: 股息率
roe_ttm: 滚动净资产收益率
total_percent: 总涨幅
income_cagr: 收益复合年均增长率
amount: 成交额
chg: 涨跌点数
issue_date_ts: 发行日unix时间戳
main_net_inflows: 主营净收入
volume: 成交量
volume_ratio: 量比
pb: 市净率
followers: 雪球网关注人数
turnover_rate: 换手率
name: 名称
pe_ttm: 滚动市盈率
total_shares: 总股本
"""
self._data = pd.DataFrame()
self._writeHead()
self._data = self.__handleStockProfiles()
self._data['issue_date_ts'] = self._data['issue_date_ts'].map(lambda x: int(x/1000))
return self._result()
def __handleStockProfiles(self):
try:
request = self._session.get(cf.XQ_HOME, headers=headers)
cookies = request.cookies
except Exception as e:
print(str(e))
page = 1
while True:
self._writeConsole()
try:
timestamp = int(time.time()*1000)
request = self._session.get(cf.XQ_STOCK_PROFILES_URL % (page, timestamp), headers=headers, cookies=cookies)
dataDict = json.loads(request.text)
if not dataDict.get('data').get('list'):
break
dataList = []
for row in dataDict.get('data').get('list'):
dataList.append(row)
self._data = self._data.append(pd.DataFrame(dataList, columns=cf.XQ_STOCK_PROFILES_COLS), ignore_index=True)
page += 1
time.sleep(1)
except Exception as e:
print(str(e))
return self._data
def report(self, year, quarter, retry=3, pause=0.001):
"""
获取业绩报表数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'code':, 'name':, ...}, ...]
code,代码
name,名称
eps,每股收益
eps_yoy,每股收益同比(%)
bvps,每股净资产
roe,净资产收益率(%)
epcf,每股现金流量(元)
net_profits,净利润(万元)
profits_yoy,净利润同比(%)
distrib,分配方案
report_date,发布日期
"""
self._data = pd.DataFrame()
if Utility.checkQuarter(year, quarter) is True:
self._writeHead()
# http://vip.stock.finance.sina.com.cn/q/go.php/vFinanceAnalyze/kind/mainindex/index.phtml?s_i=&s_a=&s_c=&reportdate=2018&quarter=3&p=1&num=60
self._data = self.__parsePage(cf.REPORT_URL, year, quarter, 1, cf.REPORT_COLS,
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "31",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_nahb_house_market_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
def macro_usa_house_starts():
"""
美国新屋开工总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_house_starts
https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388
:return: 美国新屋开工总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋开工总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "17",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_starts"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
def macro_usa_new_home_sales():
"""
美国新屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_new_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501
:return: 美国新屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "32",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_new_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
def macro_usa_building_permits():
"""
美国营建许可总数报告, 数据区间从20080220-至今
https://datacenter.jin10.com/reportType/dc_usa_building_permits
https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v=1578747599
:return: 美国营建许可总数报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国营建许可总数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "3",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_building_permits"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
def macro_usa_exist_home_sales():
"""
美国成屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_exist_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v=1578747703
:return: 美国成屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "15",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_exist_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
def macro_usa_house_price_index():
"""
美国FHFA房价指数月率报告, 数据区间从19910301-至今
https://datacenter.jin10.com/reportType/dc_usa_house_price_index
https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v=1578747781
:return: 美国FHFA房价指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国FHFA房价指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "51",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_price_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
def macro_usa_spcs20():
"""
美国S&P/CS20座大城市房价指数年率报告, 数据区间从20010201-至今
https://datacenter.jin10.com/reportType/dc_usa_spcs20
https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v=1578747873
:return: 美国S&P/CS20座大城市房价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国S&P/CS20座大城市房价指数年率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "52",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_spcs20"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
def macro_usa_pending_home_sales():
"""
美国成屋签约销售指数月率报告, 数据区间从20010301-至今
https://datacenter.jin10.com/reportType/dc_usa_pending_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v=1578747959
:return: 美国成屋签约销售指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋签约销售指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "34",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pending_home_sales"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告
def macro_usa_cb_consumer_confidence():
"""
美国谘商会消费者信心指数报告, 数据区间从19700101-至今
https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v=1578576859
:return: 美国谘商会消费者信心指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国谘商会消费者信心指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "5",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cb_consumer_confidence"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国NFIB小型企业信心指数报告
def macro_usa_nfib_small_business():
"""
美国NFIB小型企业信心指数报告, 数据区间从19750201-至今
https://cdn.jin10.com/dc/reports/dc_usa_nfib_small_business_all.js?v=1578576631
:return: 美国NFIB小型企业信心指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nfib_small_business_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NFIB小型企业信心指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "63",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "nfib_small_business"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国密歇根大学消费者信心指数初值报告
def macro_usa_michigan_consumer_sentiment():
"""
美国密歇根大学消费者信心指数初值报告, 数据区间从19700301-至今
https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment
:return: 美国密歇根大学消费者信心指数初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_michigan_consumer_sentiment_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国密歇根大学消费者信心指数初值报告"] for item in json_data["list"]]
value_df =
|
pd.DataFrame(value_list)
|
pandas.DataFrame
|
import pandas as pd
import os
os.chdir(r"C:\Users\Akash\Desktop\be_proj_py")
import spacy
from functools import reduce
import random
import argparse
import sys
import json
import pickle
nlp = spacy.load("en_core_web_sm")
data_file = pd.read_csv("chatbot_training_data.csv")
ner_data_1 = list(data_file['NER DATA'].dropna())
branch = ['comps' , "IT" , "EXTC" , "Mech", "computer", "cs", "etrx" , "mechanical"]
updated_branch = []
for data in ner_data_1:
updated_branch.append(data)
for name in branch:
new_sentence = data.replace("electronics" , name)
updated_branch.append(new_sentence)
random.shuffle(updated_branch)
updated_ner = pd.DataFrame(updated_branch)
#print(len(updated_ner))
if os.path.isdir("Data") :
updated_ner.to_csv('Data/NER raw data.csv' , index = False, header=['Updated_NER'])
else:
os.mkdir("Data")
updated_ner.to_csv('Data/NER raw data.csv' , index = False, header=['Updated_NER'])
count = 0
Sentence = []
Name = []
PoS = []
Tag = []
for data in updated_branch:
count = count +1
sen = "sentence " + str(count)
list_name = []
sen_no = []
list_pos = []
list_tag = []
sen_no.append(sen)
doc = nlp(data)
for token in doc:
list_pos.append(token.pos_)
token = str(token)
if token == "comps" or token =="IT" or token == "EXTC" or token == "Mech" or token == "computer" or token == "cs" or token == "etrx" or token =="mechanical":
list_tag.append("BRANCH")
else: list_tag.append("O")
sen_no.append("None")
list_name.append(token)
list_name.append(".")
list_pos.append(".")
list_tag.append("O")
Tag.append(list_tag)
Name.append(list_name)
PoS.append(list_pos)
Sentence.append(sen_no)
PoS = reduce(lambda z, y :z + y, PoS)
Name = reduce(lambda z, y :z + y, Name)
Sentence = reduce(lambda z, y :z + y, Sentence)
Tag = reduce(lambda z, y :z + y, Tag)
sen_no =
|
pd.DataFrame(Sentence)
|
pandas.DataFrame
|
import Simulations.offline as off
import pickle
import matplotlib.pyplot as plt
import pandas as pd
if __name__ == '__main__':
treatments_input = ['normal', 'uniform'] # ['normal', 'uniform']
treatments_predictions = ['normal', 'uniform', '0', '1']
treatment = 'cr' # choose from ['cr', 'at'] ('cr' = competitive ratio, 'at' = additive term)
for ti in treatments_input:
for tp in treatments_predictions:
with open(f'Instances/FtP-quality-phi-ratio-additive-terms_input-{ti}_preds-{tp}.pkl', 'rb') as inp:
phi_errors_ratios = pickle.load(inp)
phi_errors_ratios_df =
|
pd.DataFrame(phi_errors_ratios)
|
pandas.DataFrame
|
#Stock Simulator
#All industries and stock details (Including prices, market caps, trading volumes etc.) retrieved via web-scraping the Yahoo Finance website
import requests
from bs4 import BeautifulSoup
import csv
import pandas as pd
import yfinance as yf
from datetime import datetime, date, time, timedelta
import webbrowser
import matplotlib.pyplot as plt
import os.path
#To retreieve and process data on user 'accounts' from a csv file
#If the file exists, it will read the file to get the usernames and corresponding passwords
#If the file doesn't exist in the system, it will create one
file_exists = os.path.isfile('user_data.csv')
with open ('user_data.csv', 'a') as csvfile:
headers = ['Date of Account Creation', 'Time of Account Creation', 'Username', 'Password']
writer = csv.DictWriter(csvfile, delimiter = ',', lineterminator = '\n', fieldnames = headers)
if not file_exists:
writer.writeheader()
else:
pass
#Welcoming message - Asks you to sign in or sign up to use the simulator
#Added to give it an 'app-like' environment
while True:
try:
account = input("""Hello! Welcome to the stock simulator!
[L] - Log In
[S] - Sign Up: """)
if account == "S":
while True:
try:
filename = "user_data.csv"
username = input("""We're glad to see your interest in signing up!
Please enter your desired username (Minimum 4 characters & Case-Sensitive)
Alternatively, press B to go back: """)
if username.upper() == "B":
break
elif len(username) <4:
print()
print("Please enter a minimum of 4 characters!")
continue
else:
usernames_df =
|
pd.read_csv("user_data.csv")
|
pandas.read_csv
|
from cmdty_storage import CmdtyStorage, three_factor_seasonal_value
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
NUM_STORAGE_FACILITIES = 4
DATA_LAG = 0
def perform_valuation():
german_gas_storage_sets = list()
gas_storage_facilities = list()
# Load the 4 datasets about German storage facilities
for i in range(0, NUM_STORAGE_FACILITIES):
german_gas_storage_sets.append(pd.read_excel('data/Combined-2021-07-22-2021-06-22.xlsx', i))
# Create storage object for each facility
gas_storage_facilities.append(CmdtyStorage(freq='H',
storage_start = german_gas_storage_sets[i]['GAS DAY STARTED ON'][DATA_LAG],
storage_end = '2022-07-21',
injection_cost = 0.01,
withdrawal_cost = 0.02,
min_inventory = 0,
max_inventory = convert_twh_mmbtu(german_gas_storage_sets[i]['WORKING GAS VOLUME(TWh)'][DATA_LAG]), # Latest working volume
max_injection_rate = convert_gwh_mmbtu(german_gas_storage_sets[i]['INJECTION CAPACITY(GWh/d)'][DATA_LAG]) / 24,
max_withdrawal_rate = convert_gwh_mmbtu(german_gas_storage_sets[i]['WITHDRAWAL CAPACITY(GWh/d)'][DATA_LAG]) / 24,
)
)
begin_date = '2021-07-21'
# Creating the Inputs
monthly_index = pd.period_range(start=begin_date, periods=25, freq='M')
monthly_fwd_prices = [16.61, 15.68, 15.42, 15.31, 15.27, 15.13, 15.96, 17.22, 17.32, 17.66,
17.59, 16.81, 15.36, 14.49, 14.28, 14.25, 14.32, 14.33, 15.30, 16.58,
16.64, 16.79, 16.64, 15.90, 14.63]
# Resamples the forward curve and uses piecewise linear interpolation to fill missing values
fwd_curve =
|
pd.Series(data=monthly_fwd_prices, index=monthly_index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from adengine import engine
import redis
import cPickle as pickle
import datetime
import pandas as pd
class quikengine(engine):
#интерфейс для торговли через терминал Quik
def __init__(self):
self.r=redis.Redis()
self.indexes=dict()
self.mins=dict()
def trades(self,date,pcode,first=0,last=-1):
#возвращает список сделок из хранилища Redis в формате pandas.DataFrame
lname='trades:'+date.strftime("%d.%m.%Y")+':'+pcode
trades=self.r.lrange(lname,first,last)
trades=[pickle.loads(trade) for trade in trades]
ind=[datetime.datetime.combine(date,datetime.datetime.strptime(trade[2],"%H:%M:%S").time()) for trade in trades]
trades=[(int(trade[1]),trade[4],trade[5],True if trade[7]==u'Купля' else False) for trade in trades]
trades_df=
|
pd.DataFrame(trades,index=ind,columns=('id','price','volume','buy'))
|
pandas.DataFrame
|
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize("func, values", [
("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
])
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
'c_int': [1, 2, 3, 4],
'c_float': [4.02, 3.03, 2.04, 1.05],
'c_date': ['2019', '2018', '2016', '2017']})
df['c_date'] = pd.to_datetime(df['c_date'])
result = getattr(df.groupby('name'), func)()
expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df =
|
pd.DataFrame({'key': ['b'] * 100, 'value': 2})
|
pandas.DataFrame
|
from flask import Flask,request
import pandas as pd
import numpy as np
import json
import pickle
import os
app= Flask(__name__)
#Load Model and Scaler Files
model_path=os.path.join(os.path.pardir,os.path.pardir,'models')
model_filepath=os.path.join(model_path,'lr_model.pkl')
scaler_filepath=os.path.join(model_path,'lr_scaler.pkl')
scaler=pickle.load(open(scaler_filepath))
model=pickle.load(open(model_filepath))
# columns
columns=[u'Age', u'Fare', u'FamilySize',
u'IsMother', u'IsMale',
u'Deck_A', u'Deck_B', u'Deck_C', u'Deck_D', u'Deck_E', u'Deck_F',
u'Deck_G', u'Deck_Z', u'Pclass_1', u'Pclass_2', u'Pclass_3',
u'Title_Lady', u'Title_Master', u'Title_Miss', u'Title_Mr',
u'Title_Mrs', u'Title_Officer', u'Title_Sir', u'Fare_Bin_very_low',
u'Fare_Bin_low', u'Fare_Bin_high', u'Fare_Bin_very_high', u'Embarked_C',
u'Embarked_Q', u'Embarked_S', u'AgeState_Adult', u'AgeState_Child']
@app.route('/api',methods=['POST'])
def make_prediction():
# read json object and conver to json string
data=json.dumps(request.get_json(force=True))
# create pandas dataframe using json string
df=pd.read_json(data)
#extract passengerIds
passenger_ids=df['PassengerId'].ravel()
#actual survived values
actuals=df['Survived'].ravel()
#extract required columns based and convert to matrix
X=df[columns].as_matrix().astype('float')
# transform the input
X_scaled=scaler.transform(X)
#make predictions
predictions=model.predict(X_scaled)
# create response dataframe
df_response=
|
pd.DataFrame({'PassengerId':passenger_ids,'Predicted':predictions,'Actual':actuals})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
|
assert_frame_equal(targ, res2)
|
pandas.util.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
|
tm.assert_frame_equal(pv, expected)
|
pandas.util.testing.assert_frame_equal
|
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
|
tm.assert_index_equal(result, expected)
|
pandas._testing.assert_index_equal
|
import sklearn
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
import numpy as np
import pandas as pd
import mosek
import cvxpy as cp
class ControlBurnClassifier:
# Class attributes to store results.
forest = []
weights = []
subforest = []
#Class attributes for parameters to determine convergence.
threshold = 10**-3
tail = 5
# Private Helper Methods
def __log_odds_predict(self,X,log_odds_init,tree_list):
""" Private helper method to return the prediction of a bag-boosted forest
by summing the average log odds over boosting iterations. Returns the final
array of log odds prediction.
"""
res = []
for i in tree_list:
depth = i.max_depth
pred = i.predict(X)
res.append([depth,pred])
res = pd.DataFrame(res,columns = ['depth','pred'])
res = res.groupby('depth')['pred'].apply(np.mean).reset_index()
res = np.sum(res['pred'].to_numpy()) + log_odds_init
return res
def __converge_test(self,sequence, threshold,tail_length):
""" Private helper method to determine if a sequence converges. Sequence
converges if the tail of the sequence falls within threshold of each
other. Returns True if the sequence converged, False otherwise.
"""
diff = np.diff(sequence)
if len(diff) < (tail_length+1):
return False
else:
return (max(np.abs(diff[-tail_length:])) < threshold)
def __check_OOB_convergence(self,OOB_error_list):
""" Private helper method to check if the improvement in out-of-bag error
for a bag-boosted ensemble converges. Returns True if the last element in the
sequence of errors deltas is <=0, False otherwise.
"""
if OOB_error_list[-1] <= 0:
return True
elif (len(OOB_error_list) < max(self.tail-2,1)+1):
return False
elif all([x < self.threshold for x in OOB_error_list[-max(self.tail-2,1):]]):
return True
else:
return False
#Forest Growing Methods
def bag_forest(self,X,y):
""" Forest growing algorithm that uses the class attribute max_depth as
a hyperparameter.
Adds trees of increasing depth to a bagged ensemble until max_depth is
reached. The number of trees to add at each depth level is determined by
checking if the training error converges.
"""
self.X = X
self.y = y
threshold = self.threshold
tail = self.tail
train = X.copy()
train = train.reset_index().drop('index',axis = 1)
train['y'] = list(y)
features = X.columns
tree_list = []
max_depth = self.max_depth
for depth in range (1,max_depth+1):
early_stop_pred = []
early_stop_train_err = []
converged = False
while converged == False:
train1 = train.sample(n = len(train), replace = True)
y1 = train1['y']
X1 = train1[features]
clf = DecisionTreeClassifier(max_depth = depth)
clf.fit(X1,y1)
tree_list.append(clf)
pred = clf.predict_proba(X[features])[:,1]
early_stop_pred.append(pred)
early_stop_train_err.append(sklearn.metrics.roc_auc_score(y,(np.mean(early_stop_pred,axis = 0))))
converged = self.__converge_test(early_stop_train_err,threshold,tail)
self.forest = tree_list
return
def bagboost_forest(self,X,y):
""" Bag-boosting forest growing algorithm, no hyperparameters needed. The number of
trees to grow at each boosting iteration is determined by the convergence of
the training error. Out-of-bag error is used to determine how many boosting iterations to
conduct.
"""
threshold = self.threshold
tail = self.tail
self.X = X
self.y = y
y = pd.Series(y)
X = X.reset_index().drop('index',axis = 1)
y.index = X.index
#initialization
log_odds = np.log(sum(y)/(len(y)- sum(y)))
prob = np.exp(log_odds)/(1+np.exp(log_odds))
residual = y - prob
train = X.copy()
train['y'] = list(residual)
features = X.columns
pred_train = np.zeros(len(residual))
tree_list = []
OOB_error_list = []
OOB_converged = False
depth = 1
while OOB_converged == False:
early_stop_pred = []
early_stop_train_err = []
converged = False
OOB_matrix = []
tree_list1 = []
if len(tree_list) > 0:
current_pred = self.__log_odds_predict(X,log_odds,tree_list)
X['current_pred'] = current_pred
current_pred = X['current_pred']
X.drop('current_pred',axis = 1,inplace = True)
else:
X['current_pred'] = log_odds
current_pred = X['current_pred']
X.drop('current_pred',axis = 1,inplace = True)
while converged == False:
train1 = train.sample(n = len(train), replace = True)
OOB = train[~train.index.isin(train1.drop_duplicates().index.values)].index.values
OOB_row = np.repeat(False,len(X))
OOB_row[OOB] = True
OOB_matrix.append(OOB_row)
y1 = train1['y']
X1 = train1[features]
tree = DecisionTreeRegressor(max_depth = depth)
tree.fit(X1,y1)
tree_list.append(tree)
tree_list1.append(tree)
pred = tree.predict(X[features])
early_stop_pred.append(pred)
temp_pred = current_pred + (np.mean(early_stop_pred,axis = 0))
temp_prob = np.exp(temp_pred)/(1+np.exp(temp_pred))
early_stop_train_err.append(sklearn.metrics.roc_auc_score(y,temp_prob))
converged = self.__converge_test(early_stop_train_err,threshold,tail)
pred_train = pred_train + np.mean(early_stop_pred,axis = 0)
if converged == False:
pred_train = pred_train - np.mean(early_stop_pred,axis = 0)
indicators = pd.DataFrame(OOB_matrix).transpose()
OOB_pred_list = []
y2 = y.copy()
y2 = y2[indicators.sum(axis = 1) > 0]
current_pred = current_pred[indicators.sum(axis = 1) > 0]
pred_matrix = np.array([tree_temp.predict(X) for tree_temp in tree_list1])
ind_matrix = np.array(~indicators.values).transpose()
masked = np.ma.masked_array(pred_matrix,ind_matrix)
OOB_pred_list = masked.mean(axis = 0).data[indicators.sum(axis = 1) > 0]
next_pred = np.array(current_pred) + np.array(OOB_pred_list)
current_prob = np.exp(current_pred)/(1+np.exp(current_pred))
next_prob = np.exp(next_pred)/(1+np.exp(next_pred))
current_err = 1 - sklearn.metrics.roc_auc_score(y2,current_prob)
next_err = 1 - sklearn.metrics.roc_auc_score(y2,next_prob)
OOB_error_list.append(current_err-next_err)
all_pred = self.__log_odds_predict(X,log_odds,tree_list)
all_prob = np.exp(all_pred)/(1+np.exp(all_pred))
train['y'] = y-all_prob
OOB_converged = self.__check_OOB_convergence(OOB_error_list)
depth = depth + 1
self.forest = tree_list
return
def double_bagboost_forest(self,X,y):
""" double bag-boosting forest growing algorithm, no hyperparameters needed. The number of
trees to grow at each boosting iteration is determined by the convergence of
the training error. Out-of-bag error is used to determine how many boosting iterations to
conduct.
"""
threshold = self.threshold
tail = self.tail
self.X = X
self.y = y
y = pd.Series(y)
X = X.reset_index().drop('index',axis = 1)
y.index = X.index
#initialization
log_odds = np.log(sum(y)/(len(y)- sum(y)))
prob = np.exp(log_odds)/(1+np.exp(log_odds))
residual = y - prob
train = X.copy()
train['y'] = list(residual)
features = X.columns
pred_train = np.zeros(len(residual))
tree_list = []
OOB_error_list = []
OOB_converged = False
depth = 1
current_err = None
depth_check = False
depth_err = 99999
depth_converged = False
while depth_converged == False:
early_stop_pred = []
early_stop_train_err = []
converged = False
OOB_matrix = []
tree_list1 = []
if len(tree_list) > 0:
current_pred = self.__log_odds_predict(X,log_odds,tree_list)
X['current_pred'] = current_pred
current_pred = X['current_pred']
X.drop('current_pred',axis = 1,inplace = True)
else:
X['current_pred'] = log_odds
current_pred = X['current_pred']
X.drop('current_pred',axis = 1,inplace = True)
index = 0
while converged == False:
train1 = train.sample(n = len(train), replace = True)
OOB = train[~train.index.isin(train1.drop_duplicates().index.values)].index.values
OOB_row = np.repeat(False,len(X))
OOB_row[OOB] = True
OOB_matrix.append(OOB_row)
y1 = train1['y']
X1 = train1[features]
tree = DecisionTreeRegressor(max_depth = depth)
tree.fit(X1,y1)
tree_list.append(tree)
index = index + 1
tree_list1.append(tree)
pred = tree.predict(X[features])
early_stop_pred.append(pred)
temp_pred = current_pred + (np.mean(early_stop_pred,axis = 0))
temp_prob = np.exp(temp_pred)/(1+np.exp(temp_pred))
early_stop_train_err.append(sklearn.metrics.roc_auc_score(y,temp_prob))
converged = self.__converge_test(early_stop_train_err,threshold,tail)
pred_train = pred_train + np.mean(early_stop_pred,axis = 0)
if converged == False:
pred_train = pred_train - np.mean(early_stop_pred,axis = 0)
indicators = pd.DataFrame(OOB_matrix).transpose()
OOB_pred_list = []
y2 = y.copy()
y2 = y2[indicators.sum(axis = 1) > 0]
current_pred = current_pred[indicators.sum(axis = 1) > 0]
pred_matrix = np.array([tree_temp.predict(X) for tree_temp in tree_list1])
ind_matrix = np.array(~indicators.values).transpose()
masked = np.ma.masked_array(pred_matrix,ind_matrix)
OOB_pred_list = masked.mean(axis = 0).data[indicators.sum(axis = 1) > 0]
next_pred = np.array(current_pred) + np.array(OOB_pred_list)
next_prob = np.exp(next_pred)/(1+np.exp(next_pred))
if current_err == None:
current_prob = np.exp(current_pred)/(1+np.exp(current_pred))
current_err = 1 - sklearn.metrics.roc_auc_score(y2,current_prob)
next_err = 1 - sklearn.metrics.roc_auc_score(y2,next_prob)
OOB_error_list.append(current_err-next_err)
OOB_converged = self.__check_OOB_convergence(OOB_error_list)
if depth_check == True:
depth_check = False
if next_err > depth_err:
depth_converged = True
if OOB_converged == True:
tree_list = tree_list[:-index]
index = 0
depth = depth + 1
depth_err = current_err
depth_check = True
current_err = next_err
all_pred = self.__log_odds_predict(X,log_odds,tree_list)
all_prob = np.exp(all_pred)/(1+np.exp(all_pred))
train['y'] = y-all_prob
self.forest = tree_list
return
#optional arguments
max_depth = 10
build_forest_method = bagboost_forest
polish_method = RandomForestClassifier
alpha = 0.1
solver = 'ECOS_BB'
optimization_form= 'penalized'
#initializer
def __init__(self,alpha = 0.1,max_depth = 10, optimization_form= 'penalized',solver = 'ECOS_BB',build_forest_method = 'bagboost',
polish_method = RandomForestClassifier):
"""
Initalizes a ControlBurnClassifier object. Arguments: {alpha: regularization parameter, max_depth: optional
parameter for incremental depth bagging, optimization_form: either 'penalized' or 'constrained', solver: cvxpy solver
used to solve optimization problem, build_forest_method: either 'bagboost' or 'bag',polish_method: final model to
fit on selected features}.
"""
if optimization_form not in ['penalized','constrained']:
raise ValueError("optimization_form must be either 'penalized' or 'constrained ")
if build_forest_method not in ['bagboost','bag','doublebagboost']:
raise ValueError("build_forest_method must be either 'bag', 'bagboost', or 'doublebagboost' ")
if max_depth <= 0:
raise ValueError("max_depth must be greater than 0")
if alpha <= 0:
raise ValueError("alpha must be greater than 0")
self.alpha = alpha
self.max_depth = max_depth
self.optimization_form = optimization_form
self.solver = solver
self.polish_method = polish_method
if build_forest_method == 'bagboost':
self.build_forest_method = self.bagboost_forest
elif build_forest_method == 'bag':
self.build_forest_method = self.bag_forest
elif build_forest_method == 'doublebagboost':
self.build_forest_method = self.double_bagboost_forest
#Optimization Method
def solve_lasso(self):
""" Solves LASSO optimization problem using class attribute alpha as the
regularization parameter. Stores the selected features, weights, and
subforest.
"""
if len(self.forest) == 0:
raise Exception("Build forest first.")
alpha = self.alpha
X = self.X
y = self.y
y = pd.Series(y)
y.index = X.index
tree_list = self.forest
pred = []
ind = []
if type(tree_list[0]) == sklearn.tree._classes.DecisionTreeClassifier:
for tree in tree_list:
pred.append(tree.predict_proba(X)[:,1])
ind.append([int(x > 0) for x in tree.feature_importances_])
else:
for tree in tree_list:
pred.append(tree.predict(X))
ind.append([int(x > 0) for x in tree.feature_importances_])
pred = np.transpose(pred)
ind = np.transpose(ind)
w = cp.Variable(len(tree_list),nonneg=True)
constraints = []
if self.optimization_form == 'penalized':
loss = -cp.sum( cp.multiply(y, pred@ w ) - cp.logistic(pred @ w) )
objective = (1/len(y))*loss + alpha*cp.norm(cp.matmul(ind,w),1)
if self.optimization_form == 'constrained':
objective = -cp.sum(cp.multiply(y, pred@ w) - cp.logistic(pred @ w))
constraints = [cp.norm(cp.matmul(ind,w),1)<= alpha]
prob = cp.Problem(cp.Minimize(objective),constraints)
if self.solver == 'MOSEK':
prob.solve(solver = cp.MOSEK,mosek_params = {mosek.dparam.optimizer_max_time: 10000.0} )
else:
prob.solve(solver = self.solver)
weights = np.asarray(w.value)
weights[np.abs(weights) < self.threshold] = 0
self.weights = weights
self.subforest = list(np.array(tree_list)[[w_ind != 0 for w_ind in list(weights)]])
imp = []
for i in range(0,len(weights)):
imp.append(weights[i]*tree_list[i].feature_importances_)
imp1 = np.sum(imp, axis = 0)
self.feature_importances_ = imp1
self.features_selected_ = list(np.array(X.columns)[[i != 0 for i in imp1]])
return
#sklearn-api wrapper functions
def fit(self,X,y):
""" Wrapper function, builds a forest and solves LASSO optimization Problem
to select a subforest. Trains final model on selected features.
"""
self.build_forest_method(X,y)
self.solve_lasso()
if len(self.features_selected_) == 0:
self.trained_polish = y
else:
self.trained_polish = self.polish_method().fit(X[self.features_selected_],y)
def predict(self,X):
""" Returns binary predictions of final model trained on selected features.
"""
if len(self.features_selected_) == 0:
return np.repeat(round(np.mean(self.trained_polish)),len(X))
else:
return self.trained_polish.predict(X[self.features_selected_])
def predict_proba(self,X):
""" Returns class probability predictions of final model trained on selected features.
"""
if len(self.features_selected_) == 0:
return np.repeat(np.mean(self.trained_polish),len(X))
else:
return self.trained_polish.predict_proba(X[self.features_selected_])
def fit_transform(self,X,y):
""" Returns dataframe of selected features.
"""
self.build_forest_method(X,y)
self.solve_lasso()
if len(self.features_selected_) == 0:
return pd.DataFrame()
else:
return X[self.features_selected_]
class ControlBurnRegressor:
# Class attributes to store results.
forest = []
weights = []
subforest = []
#Class attributes for parameters to determine convergence.
threshold = 10**-3
tail = 5
# Private Helper Methods
def __loss_gradient(self,y, y_hat):
return -(y-y_hat)
def __converge_test(self,sequence, threshold,tail_length):
""" Private helper method to determine if a sequence converges. Sequence
converges if the tail of the sequence falls within threshold of each
other. Returns True if the sequence converged, False otherwise.
"""
diff = np.diff(sequence)
if len(diff) < (tail_length+1):
return False
else:
return (max(np.abs(diff[-tail_length:])) < threshold)
def __check_OOB_convergence(self,OOB_error_list):
""" Private helper method to check if the improvement in out-of-bag error
for a bag-boosted ensemble converges. Returns True if the last element in the
sequence of errors deltas is <=0, False otherwise.
"""
if OOB_error_list[-1] <= 0:
return True
elif (len(OOB_error_list) < max(self.tail-2,1)+1):
return False
elif all([x < self.threshold for x in OOB_error_list[-max(self.tail-2,1):]]):
return True
else:
return False
def __bag_boost_predict(self,X,tree_list):
res = []
for i in tree_list:
depth = i.max_depth
pred = i.predict(X)
res.append([depth,pred])
res = pd.DataFrame(res,columns = ['depth','pred'])
res = res.groupby('depth')['pred'].apply(np.mean).reset_index()
res = np.sum(res['pred'].to_numpy())
return res
#Forest Growing Methods
def bag_forest(self,X,y):
""" Forest growing algorithm that uses the class attribute max_depth as
a hyperparameter.
Adds trees of increasing depth to a bagged ensemble until max_depth is
reached. The number of trees to add at each depth level is determined by
checking if the training error converges.
"""
self.X = X
self.y = y
threshold = self.threshold
tail = self.tail
train = X.copy()
train = train.reset_index().drop('index',axis = 1)
train['y'] = list(y)
features = X.columns
tree_list = []
max_depth = self.max_depth
for depth in range (1,max_depth+1):
early_stop_pred = []
early_stop_train_err = []
converged = False
while converged == False:
train1 = train.sample(n = len(train), replace = True)
y1 = train1['y']
X1 = train1[features]
regressor = DecisionTreeRegressor(max_depth = depth)
regressor.fit(X1,y1)
tree_list.append(regressor)
pred = regressor.predict(X[features])
early_stop_pred.append(pred)
early_stop_train_err.append(sklearn.metrics.mean_squared_error(y,(np.mean(early_stop_pred,axis = 0))))
converged = self.__converge_test(early_stop_train_err,threshold,tail)
self.forest = tree_list
return
def bagboost_forest(self,X,y):
""" Bag-boosting forest growing algorithm, no hyperparameters needed. The number of
trees to grow at each boosting iteration is determined by the convergence of
the training error. Out-of-bag error is used to determine how many boosting iterations to
conduct.
"""
threshold = self.threshold
tail = self.tail
self.X = X
self.y = y
y =
|
pd.Series(y)
|
pandas.Series
|
from pathlib import Path
import pandas as pd
import spacy
from assertpy import assert_that
from src.definitions import PROJECT_ROOT
from src.main.preprocess_data import preprocess_data, parse_passage
def test_preprocess_data(tmp_path: Path):
preprocess_data(
data_root=PROJECT_ROOT / "data/test/raw",
output_dir=tmp_path,
)
actual = pd.read_csv(tmp_path / "labeled_passages.csv")
def word_as_dict(word: str) -> dict:
return actual[actual.words == word].iloc[0].to_dict()
assert_that(word_as_dict("methanol")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 1,
"words": "methanol",
"POS": "NOUN",
"labels": "B-Chemical",
}
)
assert_that(word_as_dict("poisoning")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 1,
"words": "poisoning",
"POS": "NOUN",
"labels": "B-Disease",
}
)
assert_that(word_as_dict("pyridine")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 2,
"words": "pyridine",
"POS": "NOUN",
"labels": "B-Chemical",
}
)
assert_that(word_as_dict("nucleotide")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 2,
"words": "nucleotide",
"POS": "NOUN",
"labels": "I-Chemical",
}
)
expected = pd.read_csv(PROJECT_ROOT / "data/test/labeled_passages.csv")
pd.testing.assert_frame_equal(left=actual, right=expected)
def test_parse_passage_can_handle_global_offset():
nlp = spacy.load("en_core_web_sm")
actual = parse_passage(
passage={
"offset": 20, # This is the parameter under test
"text": "Adsorption of rRNA and poly(A)-containing RNA to filters.",
"annotations": [
{
"infons": {"identifier": "MESH:D011061", "type": "Chemical"},
# TODO: configure tokenization to split on '-'
"text": "poly(A)",
"locations": [{"offset": 43, "length": 7}],
},
],
},
pubtator_id="0",
passage_id=0,
nlp=nlp,
)
print(actual)
expected = pd.DataFrame(
{
"pubtator_id": ["0"] * 9,
"passage_id": [0] * 9,
"words": [
"Adsorption",
"of",
"rRNA",
"and",
"poly(A)-containing",
"RNA",
"to",
"filters",
".",
],
"POS": [
"NOUN",
"ADP",
"ADJ",
"CCONJ",
"VERB",
"PROPN",
"ADP",
"NOUN",
"PUNCT",
],
"labels": ["O"] * 4 + ["B-Chemical"] + ["O"] * 4,
}
)
|
pd.testing.assert_frame_equal(left=actual, right=expected)
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# make_dataset.py
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
from dotenv import find_dotenv, load_dotenv
from pathlib2 import Path
import click
import logging
import warnings
with warnings.catch_warnings():
# ignore warnings that are safe to ignore according to
# https://github.com/ContinuumIO/anaconda-issues/issues/6678
# #issuecomment-337276215
warnings.simplefilter("ignore")
import pandas as pd
from src.scoring import calc_scores
from src.scoring import get_player_scoring_dict
from src.scoring import get_team_scoring_dict
@click.command()
@click.argument('from_season', type=click.INT)
@click.argument('to_season', type=click.INT)
@click.argument('scoring_method', type=click.STRING)
def main(from_season=2009, to_season=2017, scoring_method='nfl.com'):
"""Combine and score data in <project_dir>/data/raw and output to
<project_dir>/data/processed/scored-data.csv
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
project_dir = Path(__file__).resolve().parents[2]
scores_csv_path = (
project_dir / 'data' / 'processed' /
'scores-summary_{}-to-{}.csv'.format(from_season, to_season)
)
raw_dir = project_dir / 'data' / 'raw'
df_list = []
for season_year in range(from_season, to_season + 1):
season_dir = raw_dir / str(season_year)
for csv_file in season_dir.glob('*.csv'):
df = pd.read_csv(str(csv_file))
df['season'] = season_year
df_list.append(df)
full_df = pd.concat(df_list, sort=False).reset_index(drop=True)
team_scoring_dict = get_team_scoring_dict()
player_scoring_dict = get_player_scoring_dict(method=scoring_method)
full_df = calc_scores(full_df, team_scoring_dict, player_scoring_dict)
full_df['week'] = full_df[['season', 'week']].apply(
lambda x: '{}-{:02d}'.format(x[0], x[1]), axis=1
)
summary_df =
|
pd.pivot_table(data=full_df, index='player', columns='week', values='total_score')
|
pandas.pivot_table
|
import os
import sys
import uuid
import math
import pickle
import pathlib
import getpass
from platform import uname
import pandas as pd
import numpy as np
import datetime as dt
from datetime import datetime
from collections import OrderedDict
from scipy.integrate import cumtrapz
from functools import reduce
import catboost
from catboost import CatBoostRegressor
from sklearn.svm import SVR
from sklearn.impute import KNNImputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.feature_selection import SelectKBest, chi2, SelectFromModel
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error, accuracy_score
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor, ExtraTreesRegressor, BaggingClassifier
from xgboost import XGBRegressor
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings(action='ignore',category=DeprecationWarning)
warnings.filterwarnings(action='ignore',category=FutureWarning)
class Config(object):
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.WARNING)
FILE_HANDLER = logging.FileHandler('logfile.log')
FORMATTER = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(file_handler)
DATA = dict(
# BASE_DIR = pathlib.Path().resolve(),
DATASET_DIR = pathlib.Path().resolve() / "data/dengue",
EXPORT_DIR = pathlib.Path().resolve() / "data/dengue/exports",
)
ANALYSIS_CONFIG = dict(
OUTLIERS_COLS = ["precipitation_amt_mm", "reanalysis_precip_amt_kg_per_m2", "reanalysis_sat_precip_amt_mm", "station_precip_mm"]
)
MODELLING_CONFIG = dict(
TRAIN_COLS = ['year', 'weekofyear', 'ndvi_ne', 'ndvi_nw', 'ndvi_se',
'ndvi_sw', 'precipitation_amt_mm', 'reanalysis_air_temp_k',
'reanalysis_avg_temp_k', 'reanalysis_dew_point_temp_k',
'reanalysis_max_air_temp_k', 'reanalysis_min_air_temp_k',
'reanalysis_precip_amt_kg_per_m2',
'reanalysis_relative_humidity_percent', 'reanalysis_sat_precip_amt_mm',
'reanalysis_specific_humidity_g_per_kg', 'reanalysis_tdtr_k',
'station_avg_temp_c', 'station_diur_temp_rng_c', 'station_max_temp_c',
'station_min_temp_c', 'station_precip_mm'],
FEATURE_ENGINEER_COLS = ['low_season', 'rampup_season', 'high_season',
'reanalysis_specific_humidity_g_per_kg_1lag',
'reanalysis_specific_humidity_g_per_kg_2lag',
'reanalysis_specific_humidity_g_per_kg_3lag',
'reanalysis_dew_point_temp_k_1lag', 'reanalysis_dew_point_temp_k_2lag',
'reanalysis_dew_point_temp_k_3lag', 'reanalysis_min_air_temp_k_1lag',
'reanalysis_min_air_temp_k_2lag', 'reanalysis_min_air_temp_k_3lag',
'reanalysis_max_air_temp_k_1lag', 'reanalysis_max_air_temp_k_2lag',
'reanalysis_max_air_temp_k_3lag', 'station_min_temp_c_1lag',
'station_min_temp_c_2lag', 'station_min_temp_c_3lag',
'station_max_temp_c_1lag', 'station_max_temp_c_2lag',
'station_max_temp_c_3lag', 'reanalysis_air_temp_k_1lag',
'reanalysis_air_temp_k_2lag', 'reanalysis_air_temp_k_3lag',
'reanalysis_relative_humidity_percent_1lag',
'reanalysis_relative_humidity_percent_2lag',
'reanalysis_relative_humidity_percent_3lag'],
TUNING_METHOD = "random_search",
FEATURE_SELECTION_COLUMNS = ["RF", "Extratrees", "Kbest"],
)
class Analysis(Config):
data = {}
def __init__(self, city=["*"]):
self.city = city
def get_data(self):
logging.info("----------------------------------------------------------- PREPROCESSING ------------------------------------------------------------")
logging.info("Reading TRAIN Dataset:")
self.data["train_df"] =
|
pd.read_csv(self.DATA["DATASET_DIR"] / 'merged_train.csv', index_col=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from collections import OrderedDict
def load_wids_xy_data(path, target='is_female'):
"""
This function will format and condition the WIDS kaggle categorical data
- will drop unnecessary columns
- will fill NA's
"""
print('loading training data ...')
df = pd.read_csv(path + '/train.csv', low_memory=False)
print('loading test data ...')
df_test = pd.read_csv(path + '/test.csv', low_memory=False)
print('complete ...')
print('formatting ...')
# dropping sparsely populated columns
drop_cols = ['LN2_RIndLngBEOth', 'LN2_WIndLngBEOth']
drop_cols += [col for col in df.columns if 'REC' in col]
drop_cols += [col for col in df.columns if 'OTHERS' in col]
train_drop_cols = drop_cols + ['train_id']
test_drop_cols = drop_cols + ['test_id']
df.drop(columns=train_drop_cols, inplace=True)
df_test.drop(columns=test_drop_cols, inplace=True)
columns = [col for col in df.columns if col not in (['is_female'] + [target])]
y = df[target].values
X = df[columns].copy()
X_test = df_test[columns].copy()
print('imputing missing values ...')
X.fillna(-1, inplace=True)
X_test.fillna(-1, inplace=True)
if target != 'is_female':
y_test = df_test[target].values
print(X.shape, y.shape, X_test.shape, y_test.shape)
return X, y, X_test, y_test
else:
print(X.shape, y.shape, X_test.shape)
return X, y, X_test
def get_mappers(inputX, cat_cols, emb_cols):
"""
This function will take in a X pandas dataframe, turn all the data
into categorical types, and re-map the values into integers. These
mappings will be stored in 'mappers', this will also return
embedding sizes used in the neural network:
Example of embedding sizes:
City
-----
- Los Angeles
- New York
- Houston
- Portland
- Atlanta
This field would have an embedding size of (5, 2)
- 5 unique values
- 2 Embedding vector size (roughly half cardinality)
X_mapped, mappers, categorical_stats, emb_szs = get_mappers(X)
"""
X = inputX.copy()
mappers = {}
columns = X.columns
print('converting to category ...')
for idx, col in enumerate(cat_cols):
if idx % 100 == 0:
print(idx)
X[col] = X[col].astype('category')
mappers[col] = {labels: idx for idx, labels in enumerate(X[col].cat.categories)}
print('calculating cardinality')
categorical_stats = OrderedDict()
for col in X.columns:
categorical_stats[col] = len(X[col].cat.categories) + 1
embedding_sizes = OrderedDict()
for ky in emb_cols:
vl = categorical_stats[ky]
embedding_sizes[ky] = (vl, min(50, (vl + 1) // 2))
print('remapping columns to int')
for col in columns:
X[col] = X[col].map(mappers[col])
one_hot_cols = list(set(cat_cols).difference(set(emb_cols)))
X = pd.get_dummies(X, columns=one_hot_cols).copy()
emb_szs = embedding_sizes
print('complete')
idx2col = {idx: col for idx, col in enumerate(X.columns)}
col2idx = {col: idx for idx, col in enumerate(X.columns)}
return X, mappers, emb_szs, idx2col, col2idx
def get_trained_embeddings(mappers, model):
keys = mappers.keys()
emb_mtx = {}
for field, emb in zip(keys, model.embs):
emb_mtx[field] = emb.weight.data.numpy()
return emb_mtx
def get_emb_df(X, emb_mtx, mappers):
mini_dfs = []
print('applying embeddings')
for col in X.columns.values:
idxs = X[col].map(mappers[col])
# fill nones with global mean
idxs[idxs.isna()] = max(idxs) + 1
idxs = np.array(idxs, dtype=int)
# get embedding matrix
mtx = emb_mtx[col]
# calculate global mean for missing values
glb_mean = np.mean(mtx, axis=0)
# add global mean to bottom of matrix
mtx = np.concatenate([mtx, glb_mean.reshape(1, -1)], axis=0)
# create dataframe
jf =
|
pd.DataFrame(mtx[idxs, :])
|
pandas.DataFrame
|
import sys
import random
import os
from lifelines import KaplanMeierFitter
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from lifelines import CoxPHFitter
from sklearn.metrics import average_precision_score, precision_recall_curve, roc_auc_score, roc_curve, auc, \
brier_score_loss, precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import roc_auc_score, make_scorer
from CI_Configs import runs
from UKBB_Functions import Filter_CZ,to_pickle,from_pickle
from sklearn.utils import resample
from LabData import config_global as config
from LabUtils.addloglevels import sethandlers
from LabQueue.qp import fakeqp
import os
USE_FAKE_QUE=True
CALC_CI_ONLY = False
DEBUG = True
run_name = "SA_Antro_neto_whr"
if USE_FAKE_QUE:
qp=fakeqp
else:
qp=config.qp
sethandlers(file_dir=config.log_dir)
os.chdir('/net/mraid08/export/genie/LabData/Analyses/Yochai/Jobs')
def calc_TTE(row):
"""
Returns either the time between the first visit to the first appearance of diabetes, or
if diabetes was not diagnosed, or the time of diagnosis is not given - return the time of last visit"""
if pd.isnull(row["TTE"]):
return row["21003-4.0"]
else:
return row["TTE"]
def plot_ROC_curve(y_test_val, y_pred_val, AUC):
fpr, tpr, _ = roc_curve(y_test_val, y_pred_val)
fig = plt.figure(figsize=(16, 9))
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating AUC={0:0.2f}'.format(AUC))
plt.legend(loc="lower right")
plt.show()
# pdf.savefig(fig, dpi=DPI)
# plt.close(fig)
def plot_precision_recall(y_test_val, y_pred_val, APS):
precision, recall, _ = precision_recall_curve(y_test_val, y_pred_val)
fig = plt.figure(figsize=(16, 9))
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(APS))
plt.show()
# Plotting ratio graph for precision recall
rel_prec = precision / precision[0]
# fig = plt.figure()
# plt.step(recall, rel_prec, color='b', alpha=0.2, where='post')
# plt.fill_between(recall, rel_prec, step='post', alpha=0.2, color='b')
# plt.xlabel('Recall')
# plt.ylabel('Relative Precision')
# # plt.ylim([0.0, 1.05 * np.percentile(rel_prec,99.97)])
# plt.ylim([0.0, 1.05 * max(rel_prec)])
# plt.xlim([0.0, 1.0])
# plt.title('2-class Relative-Precision-Recall curve: AP={0:0.2f}'.format(APS))
# plt.show()
# # Plotting ratio graph for precision recallwith removed maximum value
fig = plt.figure(figsize=(16, 9))
plt.step(recall, rel_prec, color='b', alpha=0.2, where='post')
plt.fill_between(recall, rel_prec, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Relative Precision')
plt.ylim([0.0, 1.05 * max(np.delete(rel_prec, np.argmax(rel_prec)))])
plt.xlim([0.0, 1.0])
plt.title('2-class Relative-Precision-Recall trimmed max: AP={0:0.2f}'.format(APS))
plt.show()
# Show graph of True positive Vs.quantiles of predicted probabilities.
def get_rel_score(row):
"""
A function that is used in apply on Dataframes
Returns the predicted Survival rate at the visit time
"""
return row[row.loc["21003-4.0"]]
def get_event_n_duration(path):
"""
Calculates the time passed from visit to event, or' if no event occurs - to the last known visit
return durations,event_observed,Diab_age_df.loc[:,['TTE',"2443-3.0"]],Diab_age_df
"""
diab_age_data_path="/net/mraid08/export/jafar/UKBioBank/Data/ukb29741.csv"
diab_data_col=pd.read_csv(diab_age_data_path, nrows=0).columns.values
data_col = pd.read_csv(path, nrows=0).columns.values
diab_age_col = [x for x in diab_data_col if x.startswith("2976-")] # Aged when diabetes first diagnosed
# diab_col = [x for x in data_col if x.startswith("2443-")] # 1 if diabetes diagnosed
Init_age_col = "21003-0.0"
all_ages_cols = [col for col in data_col if col.startswith("21003-")]
all_ages_df = pd.read_csv(path, usecols=["eid"] + all_ages_cols, index_col="eid")
Diab_age_df = pd.read_csv(diab_age_data_path, usecols=diab_age_col + ["eid"], index_col="eid")
Diab_age_df["Min_diab_age"] = Diab_age_df.min(axis=1)
Diab_age_df = Diab_age_df.join(all_ages_df[Init_age_col],how="right")
Diab_age_df["TTE"] = Diab_age_df["Min_diab_age"] - Diab_age_df[
"21003-0.0"] # Calculating time from first visit to diab onset
neg_diab_age_ind = Diab_age_df.loc[
Diab_age_df["TTE"] < 0, "TTE"].index # Getting indexes of events with negative values, to filter them out
diab_ind = [ind for ind in Diab_age_df.index if ind not in neg_diab_age_ind]
Diab_age_df = Diab_age_df.loc[diab_ind, :]
diab = pd.read_csv(path, usecols=["eid", "2443-3.0"], index_col="eid")
Diab_age_df = Diab_age_df.join(diab)
Diab_age_df = Diab_age_df.join(all_ages_df["21003-4.0"]) # Time between first and last visit
Diab_age_df['TTE'] = Diab_age_df.apply(calc_TTE, axis=1)
durations = Diab_age_df['TTE'].values
event_observed = Diab_age_df['2443-3.0'].values
# return durations, event_observed, Diab_age_df.loc[:, ['TTE', "2443-3.0", "21003-4.0", "21003-3.0"]], Diab_age_df
return durations, event_observed, Diab_age_df.loc[:, ['TTE', "2443-3.0"]], Diab_age_df
def fit_n_plot_cox_train_result(Train_dummy, penalizer=0.2, var_thresh=0,plot=False):
# drop_col_diab=Train_dummy.loc[Train_dummy["2443-3.0"]==1,:].var()/Train_dummy.loc[Train_dummy["2443-3.0"]==1,:].median()<var_thresh
# drop_col_non_diab=Train_dummy.loc[Train_dummy["2443-3.0"]==1,:].var()/Train_dummy.loc[Train_dummy["2443-3.0"]==1,:].median()<var_thresh
retry=True
retry_ind=0
drop_col_diab = Train_dummy.loc[Train_dummy["2443-3.0"] == 1, :].var() <= var_thresh
drop_col_non_diab = Train_dummy.loc[Train_dummy["2443-3.0"] == 0, :].var() <= var_thresh
# drop_col_non_diab = Train_dummy.loc[Train_dummy["2443-3.0"] == 1, :].var() < var_thresh
drop_col = drop_col_diab + drop_col_non_diab
drop_columns = drop_col[drop_col == True].index.values
# print("drop_col: ",drop_columns)
use_cols = drop_col[drop_col == False].index.values
use_cols_list = list(use_cols)
if "2443-3.0" not in use_cols_list:
use_cols_list.append("2443-3.0")
if "TTE" not in use_cols_list:
use_cols_list.append("TTE")
while retry:
try:
cph = CoxPHFitter(penalizer=penalizer) ## Instantiate the class to create a cph object
cph.fit(Train_dummy[use_cols_list], duration_col="TTE", event_col='2443-3.0',
show_progress=True) ## Fit the data to train the model
success=True
retry=False
except:
print(("Failed fitting,increased panelizer is: ","{:.2f}".format(penalizer)))
retry=True
retry_ind+=1
if retry_ind==10:
print(("Failed last penalizer:", "{:.2f}".format(penalizer)," Exiting"))
return None,None,None
penalizer=penalizer*random.choice([0.01,0.1,10,100])
print(("success:",success," last penalizer:","{:.2f}".format(penalizer)))
if plot:
cph.print_summary() ## HAve a look at the significance of the features
tr_rows = Train_dummy.iloc[0:10, :-2]
cph.predict_survival_function(tr_rows).plot(figsize=[16, 9])
shp = max(5, int(0.2 * len(use_cols_list)))
f, axs = plt.subplots(1, 1, figsize=(min(15, shp), shp))
cph.plot(ax=axs)
if "2443-3.0" in use_cols_list:
use_cols_list.remove("2443-3.0")
if "TTE" in use_cols_list:
use_cols_list.remove("TTE")
return cph, use_cols_list, penalizer
def train_cox(Train_data,penalizer=0.2, var_thresh=0.0,CI=[]):
train_length=Train_data.shape[0]
if CI=="CI":
Train_data_boot = Train_data.sample(n=train_length, replace=True)
else:
Train_data_boot = Train_data
if "21003-4.0" in Train_data_boot.columns:
Train_data_boot = Train_data_boot.drop(["21003-4.0"], axis=1) # Time between visits,To avoid colinearity with TTE
if "21003-3.0" in Train_data_boot.columns:
Train_data_boot = Train_data_boot.drop(["21003-3.0"], axis=1) # Age at repeated visit To avoid colinearity with TTE
Train_dummy = pd.get_dummies(Train_data_boot, drop_first=True)
cph, use_cols, penalizer = fit_n_plot_cox_train_result(Train_dummy, penalizer=penalizer, var_thresh=var_thresh,plot=False)
return cph, use_cols, Train_dummy,penalizer
def predict_test_results(Test_data, Results_path, cph, col_names,SN=1,penalizer=[],
var_thresh=[],CI=[]):
"""
:param Test_file_path:
:param Results_path:
:param cph:
:param col_names:
:param SN:
:param penalizer:
:param var_thresh:
:return: Tot_test_pred, Y_tot, Test_dummy, y_test_val, y_pred_val, AUC, APS
"""
Test_length=Test_data.shape[0]
if CI=="CI":
Test_data_boot= Test_data.sample(n=Test_length,replace=True)
else:
Test_data_boot = Test_data
drop_cols=[x for x in ["21003-4.0", "21003-3.0", "2443-3.0", "TTE"] if x in Test_data_boot.columns]
# drop_cols=[x for x in ["21003-4.0", "21003-3.0", "2443-3.0"] if x in Test_data_boot.columns]
if len(drop_cols)>0:
Test_data_clean = Test_data_boot.drop(drop_cols, axis=1)
Test_dummy = pd.get_dummies(Test_data_clean, drop_first=True)
# Test_dummy_rel=Test_dummy.iloc[:,:-2]
test_predicted = cph.predict_survival_function(Test_dummy)
# test_predicted =cph.score(Test_dummy)
dummy_idx = np.arange(0, Test_dummy.shape[0])
Test_dummy.index=dummy_idx
Test_data_boot.index=dummy_idx
test_predicted.columns=dummy_idx
Tot_test_pred = test_predicted.T.join(Test_data_boot.loc[:, "21003-4.0"])
Tot_test_pred["21003-4.0"] = Tot_test_pred["21003-4.0"].astype(str)
col = [str(x) for x in Tot_test_pred.columns.values]
new_col_dict = dict(list(zip(Tot_test_pred.columns.values, col)))
Tot_test_pred.rename(columns=new_col_dict, inplace=True)
Tot_test_pred["pred"] = Tot_test_pred.apply(get_rel_score, axis=1)
Tot_test_pred.index=np.arange(0,Tot_test_pred.shape[0])
Test_data_boot.index=np.arange(0,Test_data_boot.shape[0])
Y_tot = Tot_test_pred.join(Test_data_boot.loc[:,"2443-3.0"]).loc[:,["pred","2443-3.0"]].dropna(axis=1)
# print("*************~~~~~ Ytot ~~~~~~~~************")
# print("KeyError: u'the label [2443-3.0] is not in the [columns]'")
# print (Y_tot)
# print("*************~~~~~++++++~~~~~~~~************")
y_test_val = Y_tot.loc[:,"2443-3.0"].values
y_pred_val = 1 - Y_tot.loc[:,"pred"].values
AUC = roc_auc_score(y_test_val, y_pred_val)
# plot_ROC_curve(y_test_val, y_pred_val, AUC)
APS = average_precision_score(y_test_val, np.array(y_pred_val))
# plot_precision_recall(y_test_val, y_pred_val, APS)
results_df = pd.DataFrame.from_dict({"APS": [APS], "AUC": [AUC], "SN": [SN],"penalizer":[penalizer],"var_thresh":[var_thresh]})
results_df = results_df.set_index("SN", drop=True)
prediction_DF = pd.DataFrame.from_dict({"y_test_val": y_test_val, "y_pred_val": y_pred_val})
results_df.to_csv(os.path.join(Results_path, "AUC_APS_results_" + str(int(SN)) + ".csv"),index=True)
prediction_DF.to_csv(os.path.join(Results_path, "y_pred_results_" + str(int(SN)) + ".csv"))
# return Tot_test_pred, Y_tot, Test_dummy, y_test_val, y_pred_val, AUC, APS
def Train_Test_Cox(run, Train_data, Test_data, Results_path, batch, CI=[]):
for ind in np.arange(run.batch_size):
SN = (batch * run.batch_size + ind)
penalizer = random.uniform(0, 100)
var_thresh = 0
cph, use_cols, Train_dummy,panelizer = train_cox(
Train_data=Train_data,penalizer=penalizer,
var_thresh=var_thresh,CI=CI)
# print (use_cols)
# Tot_test_pred, Y_tot, Test_dummy, y_test_val, y_pred_val,AUC, APS =\
# predict_test_results(run,val_file_path=val_file_path,Results_path=Results_path,
# cph=cph,col_names=use_cols,SN=SN,penalizer=penalizer,
# var_thresh=var_thresh,CI=CI)
if cph is not None:
predict_test_results(Test_data=Test_data, Results_path=Results_path,
cph=cph,col_names=use_cols,SN=SN,penalizer=penalizer,
var_thresh=var_thresh,CI=CI)
# return cph, Train_dummy, Tot_test_pred, Y_tot, Test_dummy, y_test_val, y_pred_val, AUC, APS
def optimal_params(path):
runs_files_results_names=os.listdir(path)
runs_files_results_path=[os.path.join(path,x) for x in runs_files_results_names if x.startswith("AUC_APS")]
runs_results_list=[pd.read_csv(x,index_col="SN") for x in runs_files_results_path]
runs_Results_df=pd.concat(runs_results_list)
runs_Results_df.sort_values(by="AUC",inplace=True, ascending=False)
runs_Results_df.to_csv(os.path.join(path,"runs_results_summary.cav"))
params=runs_Results_df.loc[0,:]
return params
def load_data(run,force_new_data_load=False):
pickle_data_path=os.path.join(run.model_paths,"data_dict.pkl")
try:
comptute_data=False
data_dict=from_pickle(pickle_data_path)
print(("Loaded data from:", pickle_data_path))
except:
print("Couldnt load data' computing")
comptute_data=True
if comptute_data:
data_path_dict={"train":run.train_file_path,"val":run.val_file_path,"test":run.test_file_path}
data_dict={"train":
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Tests for the sdv.constraints.tabular module."""
import pandas as pd
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = set(table_data[columns].itertuples(index=False))
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
assert instance._combinations == expected_combinations
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out =
|
pd.Series([False, False, False])
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 11:39:57 2018
@author: <NAME>
@Contains : Pre-processing functions
"""
import pandas as pd
import numpy as np
import json
def mapprice(v):
if
|
pd.isnull(v)
|
pandas.isnull
|
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
move_df = MoveDataFrame(list_move)
list_events = [
[39.984094, 116.319236, 1, Timestamp('2008-10-24 01:57:57'),
'show do tropykalia'],
[39.991013, 116.326384, 2, Timestamp('2008-10-24 00:22:01'),
'evento da prefeitura'],
[40.01, 116.312615, 3, Timestamp('2008-10-25 00:21:01'),
'show do seu joao'],
[40.013821, 116.306531, 4, Timestamp('2008-10-26 00:22:01'),
'missa']
]
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-23T03:53:05.000000000', '2008-10-23T08:37:26.000000000',
'2008-10-23T08:50:16.000000000', '2008-10-23T09:03:06.000000000',
'2008-10-23T09:58:33.000000000', '2008-10-23T21:50:45.000000000',
'2008-10-23T22:02:14.000000000', '2008-10-23T22:22:01.000000000',
'2008-10-23T23:57:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T07:53:05.000000000', '2008-10-23T12:37:26.000000000',
'2008-10-23T12:50:16.000000000', '2008-10-23T13:03:06.000000000',
'2008-10-23T13:58:33.000000000', '2008-10-24T01:50:45.000000000',
'2008-10-24T02:02:14.000000000', '2008-10-24T02:22:01.000000000',
'2008-10-24T03:57:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(9, None, dtype=np.ndarray)
type_expected = np.full(9, None, dtype=np.ndarray)
id_expected = np.full(9, None, dtype=np.ndarray)
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window_and_creates_event_id_type_all(
move_df, pois, 7200, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_join_with_pois():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 'distrito_pol_1'],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 6, 128.24869775642176, 'adocao_de_animais'],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 5, 663.0104596559174, 'rinha_de_galo_world_cup'],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 4, 286.3387434682031, 'forro_tropykalia'],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 4,
0.9311014399622559, 'forro_tropykalia'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 3,
211.06912863495492, 'supermercado_aroldo'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'policia_federal'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 6,
792.7526066105717, 'adocao_de_animais'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 7,
270.7018856738821, 'dia_do_municipio']
],
columns=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, ID_POI, DIST_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_pois_by_category():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 3, 2935.3102772960456, 7, 814.8193850933852, 5,
2672.393533820207, 6, 675.1730686007362],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 1, 637.6902157810676, 3, 3072.6963790707114, 7,
1385.3649632111096, 5, 2727.1360691122813, 6, 128.24869775642176],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 2, 1385.0871812075436, 3, 1094.8606633486436, 4,
1762.0085654338782, 5, 663.0104596559174, 6, 1965.702358742657],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 2, 3225.288830967221, 3, 810.5429984051405, 4,
286.3387434682031, 5, 1243.8915481769327, 6, 3768.0652637796675],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 3, 669.9731550451877, 4, 0.9311014399622559,
5, 1145.172578151837, 6, 3574.252994707609],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 3, 211.06912863495492, 4, 857.4175399672413,
5, 289.35378153627966, 6, 2855.1657930463994],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 3, 2179.5701631051966, 7,
2003.4096341742952, 5, 1784.3132149978549, 6, 870.5252810680124],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 3, 3702.2394204188754, 7, 1287.7039084016499,
5, 3376.4438614084356, 6, 792.7526066105717],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 3, 3154.296880053552, 7, 270.7018856738821, 5,
2997.898227057909, 6, 1443.9247752786023]
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, 'id_policia', 'dist_policia',
'id_comercio', 'dist_comercio', 'id_show', 'dist_show', 'id_risca-faca',
'dist_risca-faca', 'id_evento', 'dist_evento'
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois_by_category(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_events():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1,
'', inf, ''],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, '', inf, ''],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, '', inf, ''],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, '', inf, ''],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 'evento da prefeitura'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 'evento da prefeitura'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'evento da prefeitura'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 'show do tropykalia'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 'show do tropykalia']
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, EVENT_ID, DIST_EVENT, EVENT_TYPE
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_events(move_df, pois, time_window=45000, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_event_by_dist_and_time():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525,
|
Timestamp('2008-10-23 11:58:33')
|
pandas.Timestamp
|
"""Visualizations for neural network clustering results."""
import warnings
import re
import json
import math
from pprint import pprint
import itertools as it
from pathlib import Path
from collections import Counter
import warnings
from functools import lru_cache
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from matplotlib.pyplot import cm
from matplotlib.ticker import MaxNLocator
from sklearn.cluster import SpectralClustering
from tqdm import tqdm
from scipy import sparse
from spectral_cluster_model import weights_to_graph
#from cnn.extractor import extract_cnn_weights
from utils import (suppress, all_logging_disabled,
load_weights, get_weights_paths,
extract_classification_metrics,
enumerate2, splitter,
heatmap_fixed)
RANDOM_STATE = 42
__all__ = ['draw_clustered_mlp']
# TODO: make defualt set with None
def run_spectral_cluster(weights_path, with_shuffle=True,
n_clusters=4, shuffle_method='layer',
n_samples=None, n_workers=None,
with_shuffled_ncuts=False,
random_state=RANDOM_STATE,
):
"""if 'mlp' in str(weights_path):
named_configs = ['mlp_config']
elif 'cnn' in str(weights_path):
named_configs = ['cnn_config']
else:
raise ValueError('Either mlp or cnn should be in path to determine the config.')"""
named_configs = ['mlp_config']
config_updates = {'weights_path': weights_path,
'with_labels': True,
'with_shuffle': with_shuffle,
'seed': random_state,
'num_clusters': n_clusters,
'shuffle_method': shuffle_method,
'with_shuffled_ncuts': with_shuffled_ncuts}
if n_samples is not None:
config_updates['num_samples'] = n_samples
if n_workers is not None:
config_updates['n_workers'] = n_workers
with suppress(), all_logging_disabled():
experiment_run = clustering_experiment.run(config_updates=config_updates,named_configs=named_configs)
metrics = experiment_run.result
clustering_labels = metrics.pop('labels')
node_mask = metrics.pop('node_mask')
metrics.pop('shuffle_method', None)
labels = np.full(len(node_mask), -1)
labels[node_mask] = clustering_labels
classification_metrics = extract_classification_metrics(Path(weights_path).parent)
metrics.update(classification_metrics['unpruned']
if 'unpruned' in str(weights_path)
else classification_metrics['pruned'])
return labels, metrics
def run_double_spectral_cluster(weight_directory, with_shuffle=True,
n_clusters=4, shuffle_method='layer',
n_samples=None, n_workers=None,
with_shuffled_ncuts=False,
random_state=RANDOM_STATE,
):
weight_paths = get_weights_paths(weight_directory)
return {is_unpruned: run_spectral_cluster(weight_path, with_shuffle,
n_clusters, shuffle_method,
n_samples, n_workers,
with_shuffled_ncuts,
random_state)
for is_unpruned, weight_path in weight_paths.items()}
def extact_layer_widths(weights):
weight_shapes = (layer_weights.shape for layer_weights in weights)
layer_widths = []
layer_widths.extend(next(weight_shapes))
layer_widths.extend(shape[1] for shape in weight_shapes)
return tuple(layer_widths)
def get_color_mapper(n_clusters):
color_mapper = dict(enumerate(iter(cm.rainbow(np.linspace(0, 1, n_clusters)))))
color_mapper[-1] = 'gray'
return color_mapper
def set_square_nodes_positions(layer_width, nodes_sorted, space=3):
side = int(math.sqrt(layer_width))
assert side ** 2 == layer_width
offset_x = np.linspace(0, side*space, num=side, dtype=int)
starting_x = offset_x[-1]
xs = (np.zeros((side, side)) + offset_x[None, :]).reshape(-1)
center_node = side // 2
normalized_ys_row = ((np.arange(side) - center_node)
/ center_node)
normalized_ys = np.tile(normalized_ys_row[:, None], side).flatten()
return xs, normalized_ys, starting_x, side
def set_nodes_positions(nodes, layer_widths, clustering_labels,
is_first_square=True, dx=50, dy=5, jitter=10):
"""Set postions of nodes of a neural network for networkx drawing."""
pos = {}
labled_nodes_by_layer = splitter(zip(nodes, clustering_labels),
layer_widths)
layer_data = enumerate(zip(layer_widths, labled_nodes_by_layer))
starting_x = 0
# TODO - refactor!
for layer_index, (layer_width, labled_nodes) in layer_data:
nodes, labels = zip(*labled_nodes)
nodes_sorted = [node for _,node in sorted(zip(labels ,nodes))]
# first layer is the input (image)
# so let's draw it as a square!
if is_first_square and layer_index == 0:
nodes_sorted = nodes
(xs, normalized_ys,
shift_x, side) = set_square_nodes_positions(layer_width, nodes_sorted)
starting_x += shift_x
height = dy * shift_x
else:
nodes_sorted = [node for _,node in sorted(zip(labels ,nodes))]
starting_x += dx
xs = np.full(layer_width, starting_x, dtype=float)
xs += 2*jitter * np.random.random(layer_width) - jitter
xs = xs.round().astype(int)
center_node = layer_width // 2
normalized_ys = ((np.arange(layer_width) - center_node)
/ center_node)
height = dy * layer_width
ys = normalized_ys * height
ys = ys.round().astype(int)
pos.update({node: (x, y) for node, (x, y) in zip(nodes_sorted, zip(xs, ys))})
return pos
def draw_metrics(metrics, ax, ndigits=5):
"""Plot spectral clustering metrics as a table."""
metrics_series = pd.Series(metrics)
ax.table(cellText=metrics_series.values[:, None].round(ndigits),
colWidths = [0.25],
rowLabels=metrics_series.index,
colLabels=[''],
cellLoc = 'center', rowLoc = 'center',
loc='bottom')
def draw_clustered_mlp(weights_path,
clustering_result,
n_clusters=4,
is_first_square=True,
ax=None):
"""Draw MLP with its spectral clustering."""
weights = load_weights(weights_path)
layer_widths = extact_layer_widths(weights)
labels, metrics = clustering_result
G = nx.from_scipy_sparse_matrix(weights_to_graph(weights))
pos = set_nodes_positions(G.nodes, layer_widths, labels, is_first_square)
color_mapper = get_color_mapper(n_clusters)
color_map = [color_mapper[label] for label in labels]
if ax is None:
_, ax = plt.subplots(1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
nx.draw(G, pos=pos,
node_color=color_map,
width=0, node_size=10,
ax=ax)
draw_metrics(metrics, ax)
return ax, labels, metrics
def nodify(*args):
return '-'.join(str(arg) for arg in args)
def build_cluster_graph(weights_path,
labels,
normalize_in_out=True):
weights = load_weights(weights_path)
layer_widths = extact_layer_widths(weights)
G = nx.DiGraph()
(label_by_layer,
current_label_by_layer,
next_label_by_layer) = it.tee(splitter(labels, layer_widths), 3)
next_label_by_layer = it.islice(next_label_by_layer, 1, None)
for layer_index, layer_labels in enumerate(label_by_layer):
unique_labels = sorted(label for label in np.unique(layer_labels) if label != -1)
for label in unique_labels:
node_name = nodify(layer_index, label)
G.add_node(node_name)
edges = {}
for layer_index, (current_labels, next_labels, layer_weights) in enumerate(zip(current_label_by_layer,
next_label_by_layer,
weights)):
label_edges = it.product((label for label in np.unique(current_labels) if label != -1),
(label for label in np.unique(next_labels) if label != -1))
for current_label, next_label in label_edges:
current_mask = (current_label == current_labels)
next_mask = (next_label == next_labels)
between_weights = layer_weights[current_mask, :][:, next_mask]
if normalize_in_out:
n_weight_in, n_weight_out = between_weights.shape
n_weights = n_weight_in * n_weight_out
normalization_factor = n_weights
else:
normalization_factor = 1
edge_weight = np.abs(between_weights).sum() / normalization_factor
current_node = nodify(layer_index, current_label)
next_node = nodify(layer_index + 1, next_label)
edges[current_node, next_node] = edge_weight
for nodes, weight in edges.items():
G.add_edge(*nodes, weight=weight)
pos = nx.spring_layout(G) # compute graph layout
plt.axis("off")
nx.draw_networkx_nodes(G, pos, node_size=10)
nx.draw_networkx_edges(G, pos, alpha=0.3)
plt.show(G)
return G
def draw_cluster_by_layer(weights_path,
clustering_result,
n_clusters=4,
with_text=False,
size_factor=4,
width_factor=30,
ax=None):
G = build_cluster_graph(weights_path,
clustering_result)
labels, _ = clustering_result
weights = load_weights(weights_path)
layer_widths = extact_layer_widths(weights)
color_mapper = get_color_mapper(n_clusters)
node_size = {}
(label_by_layer,
current_label_by_layer,
next_label_by_layer) = it.tee(splitter(labels, layer_widths), 3)
next_label_by_layer = it.islice(next_label_by_layer, 1, None)
for layer_index, layer_labels in enumerate(label_by_layer):
unique_labels = sorted(label for label in np.unique(layer_labels) if label != -1)
for label in unique_labels:
node_name = nodify(layer_index, label)
node_size[node_name] = (layer_labels == label).sum()
pos = nx.drawing.nx_agraph.graphviz_layout(G, prog='dot')
width = [G[u][v]['weight'] * width_factor for u,v in G.edges()]
node_color = [color_mapper[int(v.split('-')[1])] for v in G.nodes()]
node_size = [node_size[v] * size_factor for v in G.nodes()]
if ax is None:
_, ax = plt.subplots(1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
nx.draw(G, pos,
with_labels=True,
node_color=node_color,
node_size=node_size,
# font_color='white',
width=width,
ax=ax)
if with_text:
pprint(edges)
return ax
def plot_eigenvalues_old(weights_path, n_eigenvalues=None, ax=None, **kwargs):
warnings.warn('deprecated', DeprecationWarning)
loaded_weights = load_weights(weights_path)
G = nx.from_scipy_sparse_matrix(weights_to_graph(loaded_weights))
G_nn = G.subgraph(max(nx.connected_components(G), key=len))
assert nx.is_connected(G_nn)
nrom_laplacian_matrics = nx.normalized_laplacian_matrix(G_nn)
eigen_values = np.sort(np.linalg.eigvals(nrom_laplacian_matrics.A))
if n_eigenvalues == None:
start, end = 0, len(G_nn)
elif isinstance(n_eigenvalues, int):
start, end = 0, n_eigenvalues
elif isinstance(n_eigenvalues, tuple):
start, end = n_eigenvalues
else:
raise TypeError('n_eigenvalues should be either None or int or tuple or slice.')
eigen_values = eigen_values[start:end]
if ax is None:
_, ax = plt.subplots(1)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if 'linestyle' not in kwargs:
kwargs['linestyle'] = 'none'
kwargs['marker'] = '*'
kwargs['markersize'] = 5
return ax.plot(range(start + 1, end + 1),
eigen_values,
**kwargs)
def plot_eigenvalues(weights_path, n_eigenvalues=None, ax=None, **kwargs):
weights = load_weights(weights_path)
if 'cnn' in str(weights_path):
weights, _ = extract_cnn_weights(weights, with_avg=True) #(max_weight_convention=='one_on_n'))
print([w.shape for w in weights])
# TODO: take simpler solution from delete_isolated_ccs_refactored
adj_mat = weights_to_graph(weights)
_, components = sparse.csgraph.connected_components(adj_mat)
most_common_component_counts = Counter(components).most_common(2)
main_component_id = most_common_component_counts[0][0]
assert (len(most_common_component_counts) == 1
or most_common_component_counts[1][1] == 1)
main_component_mask = (components == main_component_id)
selected_adj_mat = adj_mat[main_component_mask, :][:, main_component_mask]
nrom_laplacian_matrix = sparse.csgraph.laplacian(selected_adj_mat, normed=True)
if n_eigenvalues == None:
start, end = 0, selected_adj_mat.shape[0] - 2
elif isinstance(n_eigenvalues, int):
start, end = 0, n_eigenvalues
elif isinstance(n_eigenvalues, tuple):
start, end = n_eigenvalues
else:
raise TypeError('n_eigenvalues should be either None or int or tuple or slice.')
"""
eigen_values, _ = sparse.linalg.eigs(nrom_laplacian_matrix, k=end,
which='SM')
"""
sigma = 1
OP = nrom_laplacian_matrix - sigma*sparse.eye(nrom_laplacian_matrix.shape[0])
OPinv = sparse.linalg.LinearOperator(matvec=lambda v: sparse.linalg.minres(OP, v, tol=1e-5)[0],
shape=nrom_laplacian_matrix.shape,
dtype=nrom_laplacian_matrix.dtype)
eigen_values, _ = sparse.linalg.eigsh(nrom_laplacian_matrix, sigma=sigma,
k=end, which='LM', tol=1e-5, OPinv=OPinv)
eigen_values = np.sort(eigen_values)
eigen_values = eigen_values[start:end]
if ax is None:
_, ax = plt.subplots(1)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if 'linestyle' not in kwargs:
kwargs['linestyle'] = 'none'
kwargs['marker'] = '*'
kwargs['markersize'] = 5
return ax.plot(range(start + 1, end + 1),
eigen_values,
**kwargs)
def plot_eigenvalue_report(weight_directory,
unpruned_n_eigenvalues=None, pruned_n_eigenvalues=None,
figsize=(10, 5)):
weight_paths = get_weights_paths(weight_directory)
is_slice = (unpruned_n_eigenvalues is not None
or pruned_n_eigenvalues is not None)
n_rows = 2 if is_slice else 1
_, axes = plt.subplots(n_rows, 2, squeeze=False, figsize=figsize)
axes[0][0].set_title('Unpruned')
plot_eigenvalues(weight_paths[True],
ax=axes[0][0])
if is_slice:
plot_eigenvalues(weight_paths[True], unpruned_n_eigenvalues,
ax=axes[1][0])
axes[0][1].set_title('Pruned')
plot_eigenvalues(weight_paths[False],
ax=axes[0][1])
if is_slice:
plot_eigenvalues(weight_paths[False], pruned_n_eigenvalues,
ax=axes[1][1])
def draw_mlp_clustering_report(weight_directory,
double_clustering_results,
n_cluster=4,
title=None, figsize=(20, 30)):
weight_paths = get_weights_paths(weight_directory)
fig, axes = plt.subplots(2, 2, figsize=figsize)
if title is not None:
fig.suptitle(title)
axes[0][0].set_title('Unpruned')
draw_clustered_mlp(weight_paths[True], # True represents **un**pruned
double_clustering_results[True],
n_clusters=n_cluster,
ax=axes[0][0])
draw_cluster_by_layer(weight_paths[True],
double_clustering_results[True],
n_clusters=n_cluster,
ax=axes[1][0])
axes[0][1].set_title('Pruned')
draw_clustered_mlp(weight_paths[False],
double_clustering_results[False],
n_clusters=n_cluster,
ax=axes[0][1])
draw_cluster_by_layer(weight_paths[False],
double_clustering_results[False],
n_clusters=n_cluster,
ax=axes[1][1])
def plot_learning_curve(weight_directory, n_clusters=4, with_shuffle=False,
shuffle_method='layer', start=5, step=5,
primary_y=('ncut',),
secondary_y=('percentile', 'train_loss', 'test_loss', 'ave_in_out'),
with_tqdm=False,
ax=None):
progress_iter = tqdm if with_tqdm else iter
weight_directory_path = Path(weight_directory)
results = []
for type_ in ('unpruned', 'pruned'):
weight_paths = list(sorted(weight_directory_path.glob(f'*-{type_}*.ckpt')))[start-1::step]
_, type_results = zip(*(run_spectral_cluster(weight_path,
n_clusters=n_clusters,
with_shuffle=with_shuffle,
shuffle_method=shuffle_method)
for weight_path in progress_iter(weight_paths)))
for epoch, result in enumerate2(type_results, start=start, step=step):
result['is_pruned'] = (type_ == 'pruned')
result['epoch'] = epoch
# The result from `run_spectral_cluster` comes with the
# loss and accuracy metrics for the *final* model
# because it gets them from the `metrics.json` file.
# So for all the checkpoint models of `unpruned` we have
# the same metrics, as well as for `pruned`.
# Therefore we remove them right now, and later
# (see `evaluation_metrics` in this function)
# we will extract them from `cout.txt`.
del (result['train_loss'], result['train_acc'],
result['test_loss'], result['test_acc'])
results.extend(type_results)
df = pd.DataFrame(results)
df.loc[df['is_pruned'], 'epoch'] += df[~df['is_pruned']]['epoch'].iloc[-1]
df = df.set_index('epoch')
metrics_file = (weight_directory_path / 'metrics.json')
raw_metrics = json.loads(metrics_file.read_text())
# TODO: refactor me!
# The parsering of the metrics.json file can be done more elegantly
# and taken out to a separated function
evaluation_metrics = []
real_epoch_start = start
for type_ in ('pruned', 'unpruned'):
raw_evaluation_metics = it.islice(zip(raw_metrics[type_]['loss'],
raw_metrics[type_]['acc'],
raw_metrics[type_]['val_loss'],
raw_metrics[type_]['val_acc']),
start-1, None, step)
evaluation_metrics += [{'epoch': epoch,
'train_loss': float(train_loss), 'train_acc': float(train_acc),
'test_loss': float(test_loss), 'test_acc': float(test_acc)}
for epoch, (train_loss, train_acc, test_loss, test_acc)
in enumerate2(raw_evaluation_metics,
start=real_epoch_start, step=step)]
real_epoch_start += step * len(evaluation_metrics)
####
evaluation_metrics_df =
|
pd.DataFrame(evaluation_metrics)
|
pandas.DataFrame
|
import pandas as pd
import json
def count_unique(df, col_name):
""" Count unique values in a df column """
count = df[col_name].nunique()
return count
def get_unique_column_values(df,col_name):
""" Returns unique values """
return df[col_name].unique()
def get_column_stats(df, column_name, to_dict = False):
if to_dict:
return df[column_name].value_counts().to_dict()
else:
# return df[column_name].value_counts()
c = df[column_name].value_counts(dropna=False)
p = df[column_name].value_counts(dropna=False, normalize=True)*100
m = pd.concat([c,p], axis=1, keys=['counts', '%'])
return m
def get_pandas_percentile(df, key):
df[key].describe(percentiles=[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
return
def print_analysis(_list, key="relevance", _type="relevance"):
df = pd.DataFrame(_list)
df.columns = [key]
print("\n-----------------------------------")
print("Total unique {} responses".format(_type))
print(count_unique(df,key))
print("\n-----------------------------------")
print("Stats for {} responses".format(_type))
print(get_column_stats(df,key))
print("\n-----------------------------------")
print("Number of {} responses".format(_type))
print(df[key].describe())
return
def json_load(file_path):
with open(file_path, "r") as fb:
data = json.load(fb)
return data
def convert_list_json_dic(ranks_json):
image_ranks_dic = {}
for i in range(len(ranks_json)):
image_ranks_dic[ranks_json[i]["image_id"]] = ranks_json[i]
return image_ranks_dic
def main(dialogs_jsonpath, dense_annotations_jsonpath, data_type = "val"):
dialogs_reader = json_load(dialogs_jsonpath)
annotations_json = json_load(dense_annotations_jsonpath)
dialogs = dialogs_reader["data"]["dialogs"] # list of dialogs
annotations_json = convert_list_json_dic(annotations_json)
# print(annotations_json)
gt_relevance_list = []
for dial_index in range(len(dialogs)):
image_id = dialogs[dial_index]["image_id"]
dialog_for_image = dialogs[dial_index]["dialog"]
# This condition for train set
if image_id in annotations_json:
dense_annotations = annotations_json[image_id]
gt_round_id = dense_annotations["round_id"] -1 # Converting to 0 index
gt_image_id = dense_annotations["image_id"]
if data_type == "train":
# print(dense_annotations.keys())
gt_relevance = dense_annotations["relevance"]
else:
gt_relevance = dense_annotations["gt_relevance"]
_dialog = dialog_for_image[gt_round_id]
gt_index = _dialog["gt_index"]
_gt_relevance = gt_relevance[gt_index]
gt_relevance_list.append(_gt_relevance)
# print("Length of gt relevance:", len(gt_relevance))
print(len(gt_relevance_list))
df =
|
pd.DataFrame(gt_relevance_list)
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
def check_and_make_path(to_make):
if to_make == '':
return
if not os.path.exists(to_make):
os.makedirs(to_make)
class MATHransformer:
input_file: str
format_output_path: str
use_good_data: bool
good_data_format: str
good_data_list: list
def __init__(self, input_file, output_base_path):
self.input_file = input_file
self.format_output_path = os.path.join(output_base_path, "1.format")
self.node_list_output_path = os.path.join(output_base_path, "nodes_set")
# 创建路径
check_and_make_path(self.format_output_path)
def transform(self, trans_type=None, use_good_data=False):
print("transforming MATH...")
self.use_good_data = use_good_data
if trans_type is None:
trans_type = ['month', 'year']
if 'month' in trans_type:
self.handle_by_month()
if 'year' in trans_type:
self.handle_by_year()
print("transforming MATH complete\n")
def handle_by_year(self):
# 按年处理
dataframe = pd.read_csv(self.input_file, sep=" ", names=['from_id', 'to_id', 'timestamp'])
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], unit='s').dt.strftime('%Y')
candidate = ['2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016']
good_data = ['2010', '2011', '2012', '2013', '2014', '2015']
if self.use_good_data:
self.good_data_format = '%Y'
self.good_data_list = good_data
for year in (good_data if self.use_good_data else candidate):
tem = dataframe[['from_id', 'to_id']][dataframe['timestamp'] == year]
tem['from_id'] = tem['from_id'].map(lambda x: "U" + str(x)) # user
tem['to_id'] = tem['to_id'].map(lambda x: "U" + str(x)) # user
# 统计权重
tem = tem.groupby(['from_id', 'to_id']).size().reset_index().rename(columns={0: 'weight'})
tem.to_csv(os.path.join(self.format_output_path, str(year) + ".csv"), sep='\t', header=1, index=0)
def handle_by_month(self):
# 按月处理
dataframe = pd.read_csv(self.input_file, sep=" ", names=['from_id', 'to_id', 'timestamp'])
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], unit='s').dt.strftime('%Y-%m')
candidate = ['2009-09', '2009-10', '2009-11', '2009-12', '2010-01', '2010-02', '2010-03', '2010-04', '2010-05',
'2010-06', '2010-07', '2010-08', '2010-09', '2010-10', '2010-11', '2010-12', '2011-01', '2011-02',
'2011-03', '2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10', '2011-11',
'2011-12', '2012-01', '2012-02', '2012-03', '2012-04', '2012-05', '2012-06', '2012-07', '2012-08',
'2012-09', '2012-10', '2012-11', '2012-12', '2013-01', '2013-02', '2013-03', '2013-04', '2013-05',
'2013-06', '2013-07', '2013-08', '2013-09', '2013-10', '2013-11', '2013-12', '2014-01', '2014-02',
'2014-03', '2014-04', '2014-05', '2014-06', '2014-07', '2014-08', '2014-09', '2014-10', '2014-11',
'2014-12', '2015-01', '2015-02', '2015-03', '2015-04', '2015-05', '2015-06', '2015-07', '2015-08',
'2015-09', '2015-10', '2015-11', '2015-12', '2016-01', '2016-02', '2016-03']
good_data = ['2012-01', '2012-02', '2012-03', '2012-04', '2012-05', '2012-06', '2012-07', '2012-08', '2012-09',
'2012-10', '2012-11', '2012-12']
good_data = ['2009-10', '2009-11', '2009-12', '2010-01', '2010-02', '2010-03', '2010-04', '2010-05', '2010-06',
'2010-07', '2010-08', '2010-09', '2010-10', '2010-11', '2010-12', '2011-01', '2011-02', '2011-03',
'2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10', '2011-11', '2011-12',
'2012-01', '2012-02', '2012-03', '2012-04', '2012-05', '2012-06', '2012-07', '2012-08', '2012-09',
'2012-10', '2012-11', '2012-12', '2013-01', '2013-02', '2013-03', '2013-04', '2013-05', '2013-06',
'2013-07', '2013-08', '2013-09', '2013-10', '2013-11', '2013-12', '2014-01', '2014-02', '2014-03',
'2014-04', '2014-05', '2014-06', '2014-07', '2014-08', '2014-09', '2014-10', '2014-11', '2014-12',
'2015-01', '2015-02', '2015-03', '2015-04', '2015-05', '2015-06', '2015-07', '2015-08', '2015-09',
'2015-10', '2015-11', '2015-12', '2016-01', '2016-02']
if self.use_good_data:
self.good_data_format = '%Y-%m'
self.good_data_list = good_data
for month in (good_data if self.use_good_data else candidate):
tem = dataframe[['from_id', 'to_id']][dataframe['timestamp'] == month]
tem['from_id'] = tem['from_id'].map(lambda x: "U" + str(x)) # user
tem['to_id'] = tem['to_id'].map(lambda x: "U" + str(x)) # user
# 统计权重
tem = tem.groupby(['from_id', 'to_id']).size().reset_index().rename(columns={0: 'weight'})
tem.to_csv(os.path.join(self.format_output_path, str(month) + ".csv"), sep='\t', header=1, index=0)
def test_granularity(self, time_format='%Y-%m-%d %H:%M:%s'):
dataframe = pd.read_csv(self.input_file, sep=" ", names=['from_id', 'to_id', 'timestamp'])
print("top 10 rows:\n")
print(dataframe[0:10])
print("shape:")
print(dataframe.shape)
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], unit='s').dt.strftime(time_format)
print(np.sort(dataframe['timestamp'].unique()))
def get_full_node_set(self):
print("get full node set")
nodes_set_path = self.node_list_output_path
check_and_make_path(nodes_set_path)
dataframe = pd.read_csv(self.input_file, sep=" ", names=['from_id', 'to_id', 'timestamp'])
if self.use_good_data:
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], unit='s').dt.strftime(self.good_data_format)
dataframe = dataframe[np.isin(dataframe['timestamp'], self.good_data_list, invert=False)]
# nodes_set = np.sort(pd.concat([dataframe['from_id'], dataframe['to_id']], axis=0, ignore_index=True).unique())
nodes_set =
|
pd.concat([dataframe['from_id'], dataframe['to_id']], axis=0, ignore_index=True)
|
pandas.concat
|
import os, os.path
import pandas as pd
from requests.auth import HTTPBasicAuth
from tqdm.auto import tqdm
import requests
import time
import datetime
from apikeys import key
def currency_pair_exists(currency_pair):
'''
Check if currenct pair exists
:param str currency_pair: Currency pair (ex btcusd)
'''
url = f"https://www.bitstamp.net/api/v2/ohlc/{currency_pair}/?step=60&limit=1"
headers = {"Accept": "application/json"}
auth = HTTPBasicAuth('apikey', key.apikey)
response = requests.get(url, headers=headers , auth=auth)
if response.text == "":
return False
try:
response.json()["data"]
except TypeError:
return False
return True
def get_data(currency_pair, end=None, start=None, step=60, limit=1000):
'''
Get bitstamp historic data
:param str currency_pair: Currency pair (ex btcusd)
:param str end: Final date
:param int step: Seconds step, 60, 180, 300, 900, 1800, 3600, 7200, 14400, 21600, 43200, 86400, 259200
:param int limit: How many steps
'''
if end:
end = int(time.mktime(datetime.datetime.strptime(end, "%d/%m/%Y %H %M %S").timetuple()))
else:
end = int(datetime.datetime.now().timestamp())
url = f"https://www.bitstamp.net/api/v2/ohlc/{currency_pair}/?step={step}&limit={limit}&end={end}"
if start:
url = f"https://www.bitstamp.net/api/v2/ohlc/{currency_pair}/?step={step}&limit={limit}&start={start}"
headers = {"Accept": "application/json"}
auth = HTTPBasicAuth('apikey', key.apikey)
return requests.get(url, headers=headers , auth=auth)
def check_availability(currency_pair):
'''
Return first and last available dates on dataset for currency_pair and dataset if available
:param str currency_pair: Currency pair (ex btcusd)
:raise ValueError: if currency_pair not in database
'''
path = f"database/{currency_pair}.pkl"
if not os.path.isfile(path):
raise ValueError("Currency pair not found in the database")
df =
|
pd.read_pickle(path)
|
pandas.read_pickle
|
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import SparseArray
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
def test_unary_op(op, fill_value):
arr = np.array([0, 1, np.nan, 2])
sparray = SparseArray(arr, fill_value=fill_value)
result = op(sparray)
expected = SparseArray(op(arr), fill_value=op(fill_value))
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("fill_value", [True, False])
def test_invert(fill_value):
arr = np.array([True, False, False, True])
sparray = SparseArray(arr, fill_value=fill_value)
result = ~sparray
expected = SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
result = ~pd.Series(sparray)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
result = ~pd.DataFrame({"A": sparray})
expected = pd.DataFrame({"A": expected})
tm.assert_frame_equal(result, expected)
class TestUnaryMethods:
def test_neg_operator(self):
arr =
|
SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8)
|
pandas.core.arrays.SparseArray
|
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech':
|
pd.BooleanDtype()
|
pandas.BooleanDtype
|
# %%%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.decomposition import PCA
# %%%%
dta_airbnb = pd.read_csv('dta_airbnb_clean.csv').drop(['Unnamed: 0'],axis=1)
## data without missing values
dta_comp = dta_airbnb.dropna().reset_index() # 8608 rows * 43 columns
del dta_comp['index']
dta_comp.columns
## main variables used for predition --- 36
dta_reg = dta_comp.drop(columns = ['headline','cancel_policy_code','location', 'location_code',
'type_code','amenity','res_time_code']).copy()
## set dummies for categorical variables
dta_cat = dta_reg[['cancel_policy', 'type','response_time','location_approx']].copy()
dta_cat = dta_cat.rename(columns = {'cancel_policy':'cancel','response_time':'res_time','location_approx':'location'})
dta_reg2 = pd.get_dummies(dta_cat,drop_first = True)
dta_reg = pd.concat([dta_reg,dta_reg2],axis = 1).drop(columns = ['cancel_policy', 'type','response_time','location_approx'])
## expand main variables
dta_reg.columns
# intercation
tmp_inter = dta_reg.iloc[:,1:].copy()
inter = PolynomialFeatures(interaction_only=True)
interact = pd.DataFrame(inter.fit_transform(tmp_inter.to_numpy()))
inter_name = inter.get_feature_names(tmp_inter.columns)
tmp_interact = interact.set_axis(inter_name, axis=1, inplace=False).iloc[:,1:2883] # drop bias and location*location term
# drop interactions within the same dummy variable
def remove_dup(x):
column_title = []
for i in range(len(x)-1):
for j in range(i+1,len(x)):
col = x[i] + ' ' + x[j]
column_title.append(col)
return column_title
cancel = ['cancel_14 days before check in','cancel_30 days before check in','cancel_48 hours after booking','cancel_5 days before check in','cancel_no free cancel']
output1 = remove_dup(cancel)
type_ = ['type_camper','type_entire home','type_room','type_tent']
output2 = remove_dup(type_)
res = ['res_time_within a day','res_time_within a few hours','res_time_within an hour']
output3 = remove_dup(res)
tmp_interact = tmp_interact.drop(columns = output1+output2+output3)
dta_reg = pd.concat([dta_reg.iloc[:,0],tmp_interact.copy()],axis = 1) # 2863
# polynomial and cubic
tmp = dta_reg[['clean_fee', 'service_fee', 'occupancy_fee', 'guest','bedroom', 'beds', 'baths',
'rating','review_number', 'rating_cleanliness', 'rating_accuracy','rating_communication',
'rating_location', 'rating_check_in','rating_value', 'amenity_number','host_review']].copy()
column_name = list(tmp.columns)
# square
square_name = [s + '_suqare' for s in column_name]
tmp_square = pd.DataFrame(np.square(tmp.to_numpy()))
tmp_square = tmp_square.set_axis(square_name, axis=1, inplace=False)
dta_reg = pd.concat([dta_reg,tmp_square],axis = 1) # 2880
# cubic
cubic_name = [s + '_cubic' for s in column_name]
tmp_cubic = pd.DataFrame(np.power(tmp.to_numpy(),3))
tmp_cubic = tmp_cubic.set_axis(cubic_name, axis=1, inplace=False)
dta_reg =
|
pd.concat([dta_reg,tmp_cubic],axis = 1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Tests of the ARIMA class
"""
import numpy as np
import pandas as pd
from pmdarima.arima import ARIMA, auto_arima, AutoARIMA, ARMAtoMA
from pmdarima.arima import _validation as val
from pmdarima.compat.pytest import pytest_error_str
from pmdarima.datasets import load_lynx, load_wineind, load_heartrate
from numpy.random import RandomState
from numpy.testing import assert_array_almost_equal, assert_almost_equal, \
assert_allclose
from statsmodels import api as sm
from sklearn.metrics import mean_squared_error
import joblib
import os
import pickle
import pytest
import tempfile
import time
# initialize the random state
rs = RandomState(42)
y = rs.rand(25)
# > set.seed(123)
# > abc <- rnorm(50, 5, 1)
abc = np.array([4.439524, 4.769823, 6.558708, 5.070508,
5.129288, 6.715065, 5.460916, 3.734939,
4.313147, 4.554338, 6.224082, 5.359814,
5.400771, 5.110683, 4.444159, 6.786913,
5.497850, 3.033383, 5.701356, 4.527209,
3.932176, 4.782025, 3.973996, 4.271109,
4.374961, 3.313307, 5.837787, 5.153373,
3.861863, 6.253815, 5.426464, 4.704929,
5.895126, 5.878133, 5.821581, 5.688640,
5.553918, 4.938088, 4.694037, 4.619529,
4.305293, 4.792083, 3.734604, 7.168956,
6.207962, 3.876891, 4.597115, 4.533345,
5.779965, 4.916631])
hr = load_heartrate(as_series=True)
wineind = load_wineind()
lynx = load_lynx()
def test_basic_arma():
arma = ARIMA(order=(0, 0, 0), suppress_warnings=True)
preds = arma.fit_predict(y) # fit/predict for coverage
# No OOB, so assert none
assert arma.oob_preds_ is None
# test some of the attrs
assert_almost_equal(arma.aic(), 11.201, decimal=3) # equivalent in R
# intercept is param 0
intercept = arma.params()[0]
assert_almost_equal(intercept, 0.441, decimal=3) # equivalent in R
assert_almost_equal(arma.aicc(), 11.74676, decimal=5)
assert_almost_equal(arma.bic(), 13.639060053303311, decimal=5)
# get predictions
expected_preds = np.array([0.44079876, 0.44079876, 0.44079876,
0.44079876, 0.44079876, 0.44079876,
0.44079876, 0.44079876, 0.44079876,
0.44079876])
# generate predictions
assert_array_almost_equal(preds, expected_preds)
# Make sure we can get confidence intervals
expected_intervals = np.array([
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139],
[-0.10692387, 0.98852139]
])
_, intervals = arma.predict(n_periods=10, return_conf_int=True,
alpha=0.05)
assert_array_almost_equal(intervals, expected_intervals)
def test_issue_30():
# From the issue:
vec = np.array([33., 44., 58., 49., 46., 98., 97.])
arm = AutoARIMA(out_of_sample_size=1, seasonal=False,
suppress_warnings=True)
arm.fit(vec)
# This is a way to force it:
ARIMA(order=(0, 1, 0), out_of_sample_size=1).fit(vec)
# Want to make sure it works with X arrays as well
X = np.random.RandomState(1).rand(vec.shape[0], 2)
auto_arima(vec, X=X, out_of_sample_size=1,
seasonal=False,
suppress_warnings=True)
# This is a way to force it:
ARIMA(order=(0, 1, 0), out_of_sample_size=1).fit(vec, X=X)
@pytest.mark.parametrize(
# will be m - d
'model', [
ARIMA(order=(2, 0, 0)), # arma
ARIMA(order=(2, 1, 0)), # arima
ARIMA(order=(2, 1, 0), seasonal_order=(1, 0, 0, 12)), # sarimax
]
)
def test_predict_in_sample_conf_int(model):
model.fit(wineind)
expected_m_dim = wineind.shape[0]
preds, confints = model.predict_in_sample(return_conf_int=True, alpha=0.05)
assert preds.shape[0] == expected_m_dim
assert confints.shape == (expected_m_dim, 2)
@pytest.mark.parametrize(
'model', [
ARIMA(order=(2, 0, 0)), # arma
ARIMA(order=(2, 1, 0)), # arima
ARIMA(order=(2, 1, 0), seasonal_order=(1, 0, 0, 12)), # sarimax
]
)
@pytest.mark.parametrize('X', [None, rs.rand(wineind.shape[0], 2)])
@pytest.mark.parametrize('confints', [True, False])
def test_predict_in_sample_X(model, X, confints):
model.fit(wineind, X=X)
res = model.predict_in_sample(X, return_conf_int=confints)
if confints:
assert isinstance(res, tuple) and len(res) == 2
else:
assert isinstance(res, np.ndarray)
def _two_times_mse(y_true, y_pred, **_):
"""A custom loss to test we can pass custom scoring metrics"""
return mean_squared_error(y_true, y_pred) * 2
@pytest.mark.parametrize('as_pd', [True, False])
@pytest.mark.parametrize('scoring', ['mse', _two_times_mse])
def test_with_oob_and_X(as_pd, scoring):
endog = hr
X = np.random.RandomState(1).rand(hr.shape[0], 3)
if as_pd:
X =
|
pd.DataFrame.from_records(X)
|
pandas.DataFrame.from_records
|
import numpy as np
import pandas as pd
from decisionengine.framework.modules import Source
PRODUCES = ["provisioner_resource_spot_prices"]
class AWSSpotPrice(Source.Source):
def __init__(self, *args, **kwargs):
pass
def produces(self, schema_id_list):
return PRODUCES
# The DataBlock given to the source is t=0
def acquire(self):
resource_list = [
{"ResourceName": "AWS1", "SpotPrice": 1.},
{"ResourceName": "AWS2", "SpotPrice": 2.},
{"ResourceName": "AWS3", "SpotPrice": 2.},
{"ResourceName": "AWS4", "SpotPrice": 1.},
{"ResourceName": "AWS5", "SpotPrice": 2.}
]
resource_keys = resource_list[0].keys()
pandas_data = {}
for key in resource_keys:
pandas_data[key] =
|
pd.Series([d[key] for d in resource_list])
|
pandas.Series
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2017 jfrfonseca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Operacoes de recuperacao de dados diarios de fundos"""
"""
# IMPORTS
"""
# Biblioteca padrao
import json
import logging
# Dependencias PIP
import xmltodict
import requests
import pandas as pd
# Este pacote
from py_financas import _utils, constantes
"""
# PAYLOAD
"""
def recupera_informes_diarios_de_hoje(usuario_cvm, senha_cvm,
justificativa='Obtencao de informes diarios de cvm'):
# Inicializamos o objeto de log
log = logging.getLogger('py_financas:cvm')
# Testamos se o dia de hoje e um dia util
if not _utils.unidades_brasileiras.e_dia_util():
# Se nao, interrompemos a funcao imediatamente
log.debug("Hoje nao e um dia util. Nao e possivel recuperar os informes diarios de cvm de hoje do sistema CVM")
return pd.DataFrame()
# Inicializamos um cliente com a CVM
cliente_wsdl_cvm = _utils.inicializa_cliente_wsdl_cvm(usuario_cvm, senha_cvm)
# Solicitamos uma URL para download do documento de informes diarios dos cvm
url_documento = cliente_wsdl_cvm.service.solicAutorizDownloadArqEntrega(constantes.codigo_informes_diarios_fundos,
justificativa)
log.debug("Obtida a URL para download dos informes diarios de cvm: {}".format(url_documento))
# Recuperamos o documento da URL
documento_fundos_raw = requests.get(url_documento).content
log.debug("Recuperado o documento de informes diarios de cvm do ultimo dia util")
# Extraimos o conteudo do documento (Zipado) para uma string XML
documento_fundos = _utils.le_arquivo_zip_de_string(documento_fundos_raw)
# Convertemos o documento XML para um dicionario Python
dict_fundos = xmltodict.parse(documento_fundos)
# Obtemos o valor dos informes. Se um campo interno for nulo, retornamos um dataframe vazio
valor = dict_fundos['ROOT']
if valor is not None:
valor = valor['INFORMES']
if valor is not None:
valor = valor['INFORME_DIARIO']
else:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
This routine calculates the radar moments for the RPG 94 GHz FMCW radar 'LIMRAD94' and generates a NetCDF4 file.
The generated files can be used as input for the Cloudnet processing chain.
Args:
**date (string): format YYYYMMDD
**path (string): path where NetCDF file will be stored
Example:
.. code::
python spec2mom_limrad94.py date=20181201 path=/tmp/pycharm_project_626/scripts_Willi/cloudnet_input/
"""
import bisect
import copy
import warnings
import datetime
import logging
import numpy as np
import pandas as pd
import sys
import time
from itertools import product
from numba import jit
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from scipy.signal import correlate
from typing import List, Set, Dict, Tuple, Optional, Union
warnings.simplefilter("ignore", UserWarning)
sys.path.append('../../larda/')
from pyLARDA.helpers import z2lin, argnearest, lin2z, ts_to_dt, dt_to_ts
logger = logging.getLogger(__name__)
def replace_fill_value(data, newfill):
"""
Replaces the fill value of an spectrum container by their time and range specific mean noise level.
Args:
data (numpy.array) : 3D spectrum array (time, range, velocity)
newfill (numpy.array) : 2D new fill values for 3rd dimension (velocity)
Returns:
var (numpy.array): spectrum with mean noise
"""
n_ts, n_rg, _ = data.shape
var = data.copy()
masked = np.all(data <= 0.0, axis=2)
for iT in range(n_ts):
for iR in range(n_rg):
if masked[iT, iR]:
var[iT, iR, :] = newfill[iT, iR]
else:
var[iT, iR, var[iT, iR, :] <= 0.0] = newfill[iT, iR]
return var
def get_chirp_from_range(rg_offsets, i_rg):
for i, ioff in enumerate(rg_offsets[1:]):
if i_rg <= ioff: return i
@jit(nopython=True, fastmath=True)
def estimate_noise_hs74(spectrum, navg=1, std_div=6.0, nnoise_min=1):
"""REFERENCE TO ARM PYART GITHUB REPO: https://github.com/ARM-DOE/pyart/blob/master/pyart/util/hildebrand_sekhon.py
Estimate noise parameters of a Doppler spectrum.
Use the method of estimating the noise level in Doppler spectra outlined
by Hildebrand and Sehkon, 1974.
Args:
spectrum (array): Doppler spectrum in linear units.
navg (int, optional): The number of spectral bins over which a moving average
has been taken. Corresponds to the **p** variable from equation 9 of the article.
The default value of 1 is appropriate when no moving average has been applied to the spectrum.
std_div (float, optional): Number of standard deviations above mean noise floor to specify the
signal threshold, default: threshold=mean_noise + 6*std(mean_noise)
nnoise_min (int, optional): Minimum number of noise samples to consider the estimation valid.
Returns:
tuple with
- **mean** (*float*): Mean of points in the spectrum identified as noise.
- **threshold** (*float*): Threshold separating noise from signal. The point in the spectrum with
this value or below should be considered as noise, above this value
signal. It is possible that all points in the spectrum are identified
as noise. If a peak is required for moment calculation then the point
with this value should be considered as signal.
- **var** (*float*): Variance of the points in the spectrum identified as noise.
- **nnoise** (*int*): Number of noise points in the spectrum.
References:
<NAME> and <NAME>, Objective Determination of the Noise
Level in Doppler Spectra. Journal of Applied Meteorology, 1974, 13, 808-811.
"""
sorted_spectrum = np.sort(spectrum)
nnoise = len(spectrum) # default to all points in the spectrum as noise
rtest = 1 + 1 / navg
sum1 = 0.
sum2 = 0.
for i, pwr in enumerate(sorted_spectrum):
npts = i + 1
if npts < nnoise_min:
continue
sum1 += pwr
sum2 += pwr * pwr
if npts * sum2 < sum1 * sum1 * rtest:
nnoise = npts
else:
# partial spectrum no longer has characteristics of white noise.
sum1 -= pwr
sum2 -= pwr * pwr
break
mean = sum1 / nnoise
var = sum2 / nnoise - mean * mean
threshold = mean + np.sqrt(var) * std_div
return mean, threshold, var, nnoise
@jit(nopython=True, fastmath=True)
def find_peak_edges(signal, threshold=-1, imaxima=-1):
"""Returns the indices of left and right edge of the main signal peak in a Doppler spectra.
Args:
signal (numpy.array): 1D array Doppler spectra
threshold: noise threshold
Returns:
[index_left, index_right] (list): indices of signal minimum/maximum velocity
"""
len_sig = len(signal)
index_left, index_right = 0, len_sig
if threshold < 0: threshold = np.min(signal)
if imaxima < 0: imaxima = np.argmax(signal)
for ispec in range(imaxima, len_sig):
if signal[ispec] > threshold: continue
index_right = ispec
break
for ispec in range(imaxima, -1, -1):
if signal[ispec] > threshold: continue
index_left = ispec + 1 # the +1 is important, otherwise a fill_value will corrupt the numba code
break
return threshold, [index_left, index_right]
@jit(nopython=True, fastmath=True)
def radar_moment_calculation(signal, vel_bins, DoppRes):
"""
Calculation of radar moments: reflectivity, mean Doppler velocity, spectral width,
skewness, and kurtosis of one Doppler spectrum. Optimized for the use of Numba.
Note:
Divide the signal_sum by 2 because vertical and horizontal channel are added.
Subtract half of of the Doppler resolution from mean Doppler velocity, because
Args:
- signal (float array): detected signal from a Doppler spectrum
- vel_bins (float array): extracted velocity bins of the signal (same length as signal)
- DoppRes (int): resolution of the Doppler spectra (different for each chirp)
Returns:
dict containing
- **Ze_lin** (*float array*): reflectivity (0.Mom) over range of velocity bins [mm6/m3]
- **VEL** (*float array*): mean velocity (1.Mom) over range of velocity bins [m/s]
- **sw** (*float array*): spectrum width (2.Mom) over range of velocity bins [m/s]
- **skew** (*float array*): skewness (3.Mom) over range of velocity bins
- **kurt** (*float array*): kurtosis (4.Mom) over range of velocity bins
"""
signal_sum = np.sum(signal) # linear full spectrum Ze [mm^6/m^3], scalar
Ze_lin = signal_sum / 2.0
pwr_nrm = signal / signal_sum # determine normalized power (NOT normalized by Vdop bins)
VEL = np.sum(vel_bins * pwr_nrm)
vel_diff = vel_bins - VEL
vel_diff2 = vel_diff * vel_diff
sw = np.sqrt(np.abs(np.sum(pwr_nrm * vel_diff2)))
sw2 = sw * sw
skew = np.sum(pwr_nrm * vel_diff * vel_diff2 / (sw * sw2))
kurt = np.sum(pwr_nrm * vel_diff2 * vel_diff2 / (sw2 * sw2))
VEL = VEL - DoppRes / 2.0
return Ze_lin, VEL, sw, skew, kurt
@jit(nopython=True, fastmath=True)
def despeckle(mask, min_percentage):
"""Remove small patches (speckle) from any given mask by checking 5x5 box
around each pixel, more than half of the points in the box need to be 1
to keep the 1 at current pixel
Args:
mask (numpy.array, integer): 2D mask where 1 = an invalid/fill value and 0 = a data point (time, height)
min_percentage (float): minimum percentage of neighbours that need to be signal above noise
Returns:
mask ... speckle-filtered matrix of 0 and 1 that represents (cloud) mask [height x time]
"""
WSIZE = 5 # 5x5 window
n_bins = WSIZE * WSIZE
min_bins = int(min_percentage / 100 * n_bins)
shift = int(WSIZE / 2)
n_ts, n_rg = mask.shape
for iT in range(n_ts - WSIZE):
for iR in range(n_rg - WSIZE):
if mask[iT, iR] and np.sum(mask[iT:iT + WSIZE, iR:iR + WSIZE]) > min_bins:
mask[iT + shift, iR + shift] = True
return mask
def make_container_from_spectra(spectra_all_chirps, values, paraminfo, invalid_mask, varname=''):
"""
This routine will generate a larda container from calculated moments from spectra.
Args:
spectra_all_chirps (list of dicts): dimension [nchirps], containing the spectrum
values of the 94 GHz RPG cloud radar
values (numpy array): dimension [nrange, ntimes], values of calculated moments
paraminfo (dict): information from params_[campaign].toml for the specific variable
Returns:
container (dict): larda data container
"""
if len(varname) > 0:
spectra_all_chirps = [spectra_all_chirps[ic][varname] for ic in range(len(spectra_all_chirps))]
spectra = spectra_all_chirps[0]
#np.array([rg for ic in spectra_all_chirps for rg in ic['rg']])
container = {'dimlabel': ['time', 'range'],
'filename': spectra['filename'] if 'filename' in spectra else '',
'paraminfo': copy.deepcopy(paraminfo),
'rg_unit': paraminfo['rg_unit'], 'colormap': paraminfo['colormap'],
'var_unit': paraminfo['var_unit'],
'var_lims': paraminfo['var_lims'],
'system': paraminfo['system'], 'name': paraminfo['paramkey'],
'rg': spectra['rg'], 'ts': spectra['ts'],
'mask': invalid_mask, 'var': values[:]}
return container
def load_spectra_rpgfmcw94(larda, time_span, rpg_radar='LIMRAD94', **kwargs):
"""
This routine will generate a list of larda containers including spectra of the RPG-FMCW 94GHz radar.
The list-container at return will contain the additional information, for each chirp.
Args:
rpg_radar (string): name of the radar system as defined in the toml file
larda (class larda): Initialized pyLARDA, already connected to a specific campaign
time_span (list): Starting and ending time point in datetime format.
**noise_factor (float): Noise factor, number of standard deviations from mean noise floor
**ghost_echo_1 (bool): Filters ghost echos which occur over all chirps during precipitation.
**ghost_echo_2 (bool): Filters ghost echos which occur over 1 chirps during precipitation.
**estimate_noise (boal): If True, adds the following noise estimation values to the container:
- mean (2d ndarray): Mean noise level of the spectra.
- threshold (2d ndarray): Noise threshold, values above this threshold are consider as signal.
- variance (2d ndarray): The variance of the mean noise level.
- numnoise (2d ndarray): Number of Pixels that are cconsideras noise.
- signal (2d ndarray): Boolean array, a value is True if no signal was detected.
- bounds (3d ndarrax): Dimensions [n_time, n_range, 2] containing the integration boundaries.
Returns:
container (list): list of larda data container
- **spec[i_chirps]['no_av']** (*float*): Number of spectral averages divided by the number of FFT points
- **spec[i_chirps]['DoppRes']** (*float*): Doppler resolution for
- **spec[i_chirps]['SL']** (*2D-float*): Sensitivity limit (dimensions: time, range)
- **spec[i_chirps]['NF']** (*string*): Noise factor, default = 6.0
- **spec[i_chirps]['rg_offsets']** (*list*): Indices, where chipr shifts
"""
# read limrad94 doppler spectra and caluclate radar moments
std_above_mean_noise = float(kwargs['noise_factor']) if 'noise_factor' in kwargs else 6.0
heave_correct = kwargs['heave_correction'] if 'heave_correction' in kwargs else False
version = kwargs['heave_corr_version'] if 'heave_corr_version' in kwargs else 'jr'
add = kwargs['add'] if 'add' in kwargs else False
shift = kwargs['shift'] if 'shift' in kwargs else 0
dealiasing_flag = kwargs['dealiasing'] if 'dealiasing' in kwargs else False
ghost_echo_1 = kwargs['ghost_echo_1'] if 'ghost_echo_1' in kwargs else True
ghost_echo_2 = kwargs['ghost_echo_2'] if 'ghost_echo_2' in kwargs else True
do_despeckle2D = kwargs['despeckle2D'] if 'despeckle2D' in kwargs else True
add_horizontal_channel = True if 'add_horizontal_channel' in kwargs and kwargs['add_horizontal_channel'] else False
estimate_noise = True if std_above_mean_noise > 0.0 else False
AvgNum_in = larda.read(rpg_radar, "AvgNum", time_span)
DoppLen_in = larda.read(rpg_radar, "DoppLen", time_span)
MaxVel_in = larda.read(rpg_radar, "MaxVel", time_span)
ChirpFFTSize_in = larda.read(rpg_radar, "ChirpFFTSize", time_span)
SeqIntTime_in = larda.read(rpg_radar, "SeqIntTime", time_span)
data = {}
# depending on how much files are loaded, AvgNum and DoppLen are multidimensional list
if len(AvgNum_in['var'].shape) > 1:
AvgNum = AvgNum_in['var'][0]
DoppLen = DoppLen_in['var'][0]
ChirpFFTSize = ChirpFFTSize_in['var'][0]
DoppRes = np.divide(2.0 * MaxVel_in['var'][0], DoppLen_in['var'][0])
MaxVel = MaxVel_in['var'][0]
SeqIntTime = SeqIntTime_in['var'][0]
else:
AvgNum = AvgNum_in['var']
DoppLen = DoppLen_in['var']
ChirpFFTSize = ChirpFFTSize_in['var']
DoppRes = np.divide(2.0 * MaxVel_in['var'], DoppLen_in['var'])
MaxVel = MaxVel_in['var']
SeqIntTime = SeqIntTime_in['var']
# initialize
tstart = time.time()
if add_horizontal_channel:
data['SLh'] = larda.read(rpg_radar, "SLh", time_span, [0, 'max'])
data['HSpec'] = larda.read(rpg_radar, 'HSpec', time_span, [0, 'max'])
data['ReVHSpec'] = larda.read(rpg_radar, 'ImVHSpec', time_span, [0, 'max'])
data['ImVHSpec'] = larda.read(rpg_radar, 'ReVHSpec', time_span, [0, 'max'])
data['VHSpec'] = larda.read(rpg_radar, 'VSpec', time_span, [0, 'max'])
data['SLv'] = larda.read(rpg_radar, "SLv", time_span, [0, 'max'])
data['mdv'] = larda.read(rpg_radar, 'VEL', time_span, [0, 'max'])
data['NF'] = std_above_mean_noise
data['no_av'] = np.divide(AvgNum, DoppLen)
data['DoppRes'] = DoppRes
data['DoppLen'] = DoppLen
data['MaxVel'] = MaxVel
data['ChirpFFTSize'] = ChirpFFTSize
data['SeqIntTime'] = SeqIntTime
data['n_ts'], data['n_rg'], data['n_vel'] = data['VHSpec']['var'].shape
data['n_ch'] = len(MaxVel)
data['rg_offsets'] = [0]
data['vel'] = []
for var in ['C1Range', 'C2Range', 'C3Range']:
logger.debug('loading variable from LV1 :: ' + var)
data.update({var: larda.read(rpg_radar, var, time_span, [0, 'max'])})
for ic in range(len(AvgNum)):
nrange_ = larda.read(rpg_radar, f'C{ic + 1}Range', time_span)['var']
if len(nrange_.shape) == 1:
nrange_ = nrange_.size
else:
nrange_ = nrange_.shape[1]
data['rg_offsets'].append(data['rg_offsets'][ic] + nrange_)
data['vel'].append(np.linspace(-MaxVel[ic] + (0.5 * DoppRes[ic]), +MaxVel[ic] - (0.5 * DoppRes[ic]), np.max(DoppLen)))
data['VHSpec']['rg_offsets'] = data['rg_offsets']
logger.info(f'Loading spectra, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
"""
####################################################################################################################
____ ___ ___ _ ___ _ ____ _ _ ____ _ ___ ____ ____ ___ ____ ____ ____ ____ ____ ____ _ _ _ ____
|__| | \ | \ | | | | | |\ | |__| | |__] |__/ |___ |__] |__/ | | | |___ [__ [__ | |\ | | __
| | |__/ |__/ | | | |__| | \| | | |___ | | \ |___ | | \ |__| |___ |___ ___] ___] | | \| |__]
####################################################################################################################
"""
if heave_correct:
tstart = time.time()
current_day = ts_to_dt(data['VHSpec']['ts'][0])
data['VHSpec']['var'], data['heave_cor'], data['heave_cor_bins'], _, data['time_shift_array'] = heave_correction_spectra(
data,
current_day,
path_to_seapath="/projekt2/remsens/data_new/site-campaign/rv_meteor-eurec4a/instruments/RV-METEOR_DSHIP",
mean_hr=True,
only_heave=False,
use_cross_product=True,
transform_to_earth=True,
add=add,
shift=shift,
version=version)
logger.info(f'Heave correction applied, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
if do_despeckle2D:
tstart = time.time()
data['dspkl_mask'] = despeckle2D(data['VHSpec']['var'])
data['VHSpec']['var'][data['dspkl_mask']], data['VHSpec']['mask'][data['dspkl_mask']] = -999.0, True
logger.info(f'Despeckle applied, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
# read spectra and other variables
if estimate_noise:
tstart = time.time()
data['edges'] = np.full((data['n_ts'], data['n_rg'], 2), 0, dtype=int)
try:
data['Vnoise'] = larda.read(rpg_radar, 'VNoisePow', time_span, [0, 'max'])
if add_horizontal_channel: data['Hnoise'] = larda.read(rpg_radar, 'HNoisePow', time_span, [0, 'max'])
# initialize arrays
data['mean'] = np.full((data['n_ts'], data['n_rg']), -999.0)
data['variance'] = np.full((data['n_ts'], data['n_rg']), -999.0)
tmp = data['VHSpec']['var'].copy()
tmp[tmp <= 0.0] = np.nan
# catch RuntimeWarning: All-NaN slice encountered
with warnings.catch_warnings():
warnings.simplefilter("ignore")
data['thresh'] = np.nanmin(tmp, axis=2)
data['var_max'] = np.nanmax(tmp, axis=2)
# find all-noise-spectra (aka. fill_value)
mask = np.all(data['VHSpec']['var'] == -999.0, axis=2)
data['thresh'][mask] = data['Vnoise']['var'][mask]
del tmp
except KeyError:
logger.info('KeyError: Noise Power variable not found, calculate noise level...')
noise_est = noise_estimation_uncompressed_data(data['VHSpec'], no_av=data['no_av'], n_std=6.0, rg_offsets=data['rg_offsets'])
mask = ~noise_est['signal']
data['thresh'] = noise_est['threshold']
data['VHSpec']['var'][mask] = -999.0
# IGNORES: RuntimeWarning: invalid value encountered in less:
# masking = data['VHSpec']['var'][iT, iR, :] < data['thresh'][iT, iR]
with np.errstate(invalid='ignore'):
for iT, iR in product(range(data['n_ts']), range(data['n_rg'])):
if mask[iT, iR]: continue
masking = data['VHSpec']['var'][iT, iR, :] < data['thresh'][iT, iR]
data['VHSpec']['var'][iT, iR, masking] = -999.0
if dealiasing_flag:
dealiased_spec, dealiased_mask, new_vel, new_bounds, _, _ = dealiasing(
data['VHSpec']['var'],
data['vel'],
data['SLv']['var'],
data['rg_offsets'],
vel_offsets=kwargs['dealiasing_vel'] if 'dealiasing_vel' in kwargs else None,
show_triple=False
)
data['VHSpec']['var'] = dealiased_spec
data['VHSpec']['mask'] = dealiased_mask
data['VHSpec']['vel'] = new_vel[0] # copy to larda container
data['vel'] = new_vel # copy all veloctiys
data['edges'] = new_bounds
else:
for iT, iR in product(range(data['n_ts']), range(data['n_rg'])):
if mask[iT, iR]: continue
_, data['edges'][iT, iR, :] = find_peak_edges(data['VHSpec']['var'][iT, iR, :], data['thresh'][iT, iR])
logger.info(f'Loading Noise Level, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
if ghost_echo_1:
tstart = time.time()
data['ge1_mask'] = filter_ghost_1(data['VHSpec']['var'], data['VHSpec']['rg'], data['vel'], data['rg_offsets'])
logger.info(f'Precipitation Ghost Filter applied, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
logger.info(f'Number of ghost pixel due to precipitation = {np.sum(data["ge1_mask"])}')
data['VHSpec']['var'][data['ge1_mask']], data['VHSpec']['mask'][data['ge1_mask']] = -999.0, True
if ghost_echo_2:
data['ge2_mask'] = filter_ghost_2(data['VHSpec']['var'], data['VHSpec']['rg'], data['SLv']['var'], data['rg_offsets'][1])
logger.info(f'Curtain-like Ghost Filter applied, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
logger.info(f'Number of curtain-like ghost pixel = {np.sum(data["ge2_mask"])}')
data['VHSpec']['var'][data['ge2_mask']], data['VHSpec']['mask'][data['ge2_mask']] = -999.0, True
if do_despeckle2D:
tstart = time.time()
data['dspkl_mask'] = despeckle2D(data['VHSpec']['var'])
data['VHSpec']['var'][data['dspkl_mask']], data['VHSpec']['mask'][data['dspkl_mask']] = -999.0, True
logger.info(f'Despeckle applied, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
return data
def dealiasing_check(masked3D):
"""
Checks for folding.
Args:
masked3D (numpy.array): 3D (time, range, velocity)
vel (list): contains 1D numpy.arrays for each chirp
mean_noise (numpy.array):
rg_offsets (numpy.array):
Returns:
alias_flag (numpy.array):
"""
frac = 5
alias_flag = np.full(masked3D.shape[:2], False)
masked2D = masked3D.all(axis=2)
Nfft = masked3D.shape[2]
frac = np.ceil(Nfft / 100 * frac).astype(np.int32)
for iT, iR in product(range(masked3D.shape[0]), range(masked3D.shape[1])):
if masked2D[iT, iR]: continue # no signal was recorded
# check if aliasing occured by checking if more than 'frac' percent of the bins exceeded
# mean noise level at one of the spectra
n_start = np.sum(~masked3D[iT, iR, :frac])
n_end = np.sum(~masked3D[iT, iR, Nfft-frac+1:Nfft+1])
if n_start >= frac or n_end >= frac: alias_flag[iT, iR] = True # then aliasing detected
return alias_flag
def dealiasing(
spectra: np.array,
vel_bins_per_chirp: List[np.array],
noisefloor: np.array,
rg_offsets: List[int] = None,
show_triple: bool = False,
vel_offsets: List[int] = None,
jump: int = None,
) -> Union[np.array, np.array, List[np.array], np.array, np.array, np.array]:
"""
Peaks exceeding the maximum unambiguous Doppler velocity range of ± v_Nyq in [m s-1]
appear at the next upper (lower) range gate at the other end of the velocity spectrum.
The dealiasing method presented here aims to correct for this and is applied to every time step.
Logging level INFO shows the de-aliasing progress bar.
Args:
spectra: dim = (n_time, n_range, n_velocity) in linear units!
vel_bins_per_chirp: len = (n_chirp), each list element contains a numpy array of velocity bins
noisefloor: dim = (n_time, n_range) in linear units!
rg_offsets (optional): dim = (n_chirp + 1), starting with 0, range indices where chirp shift occurs
show_triple (optional): if True, return dealiased spectra including the triplication
vel_offsets (optional): velocity window around the main peak [x1, x2], x1 < 0, x2 > 0 ! in [m s-1], default [-6.0, +9.0]
jump (optional): maximum number of Doppler bins a spectrum can change in two adjacent range bins
Returns:
tuple containing
- **dealiased_spectra**: dim = (n_time, n_range, 3 * n_velocity), de-aliased Doppler spectrum
- **dealiased_mask**: dim = (n_time, n_range, 3 * n_velocity), True if no signal
- **velocity_new**: len = (n_chirp), each list element contains a numpy array of velocity bins for the respective chirp of ± 3*v_Nyq in [m s-1]
- **signal_boundaries**: indices of left and right edge of a signal, [-1, -1] if no signal
- **search_path**: indices of left and right edge of the search path, [-1, -1] if no signal
- **idx_peak_matrix**: indices of the main peaks, [NDbins / 2] if no signal
.. todo::
- add time--height mask for dealiasing
- search window relative to [m s-1]
- abs(idx_new_peak - mean_idx_last_ts) > 120: --> relativ
"""
(n_ts, n_rg, n_vel), n_ch = spectra.shape, len(rg_offsets) - 1
n_vel_new = 3 * n_vel
k = 2
if jump is None:
jump = n_vel // 2
# triplicate velocity bins
velocity_new = []
for v in vel_bins_per_chirp:
vel_range = v[-1] - v[0]
velocity_new.append(np.linspace(v[0] - vel_range, v[-1] + vel_range, n_vel_new))
# set (n_rg, 2) array containing velocity index offset ±velocty_jump_tolerance from maxima from last range gate
_one_in_all = [-7.0, +7.0] if vel_offsets is None else vel_offsets
velocty_jump_tolerance = np.array([_one_in_all for _ in range(n_ch)]) # ± [m s-1]
rg_diffs = np.diff(rg_offsets)
Dopp_res = np.array([vel_bins_per_chirp[ic][1] - vel_bins_per_chirp[ic][0] for ic in range(n_ch)])
iDbinTol = [velocty_jump_tolerance[ires, :] // res for ires, res in enumerate(Dopp_res)]
iDbinTol = np.concatenate([np.array([iDbinTol[ic]] * rg_diffs[ic]) for ic in range(n_ch)]).astype(np.int)
# triplicate spectra
Z_linear = np.concatenate([spectra for _ in range(3)], axis=2)
# initialize arrays for dealiasing
window_fcn = np.kaiser(n_vel_new, 4.0)
signal_boundaries = np.zeros((n_ts, n_rg, 2), dtype=np.int)
search_path = np.zeros((n_ts, n_rg, 2), dtype=np.int)
dealiased_spectra = np.full(Z_linear.shape, -999.0, dtype=np.float32)
dealiased_mask = np.full(Z_linear.shape, True, dtype=np.bool)
idx_peak_matrix = np.full((n_ts, n_rg), n_vel_new // 2, dtype=np.int)
all_clear = np.all(np.all(spectra <= 0.0, axis=2), axis=1)
noise = np.copy(noisefloor)
noise_mask = spectra.min(axis=2) > noise
noise[noise_mask] = spectra.min(axis=2)[noise_mask]
logger.debug(f'Doppler resolution per chirp : {Dopp_res}')
logger.info(f'Doppler spectra de-aliasing....... ')
for iT in range(n_ts) if logger.level > 20 else tqdm(range(n_ts), unit=' timesteps', total=n_ts):
# entire profile is clear sky
if all_clear[iT]: continue
# assume no dealiasing at upper most range gate
idx_last_peak = n_vel_new // 2
# Top-Down approach: check range gates below
for iR in range(n_rg - 1, -1, -1):
# the search window for the next peak maximum surrounds ± velocity_jump_tolerance [m s-1] around the last peak maximum
search_window = range(max(idx_last_peak + iDbinTol[iR, 0], 0), min(idx_last_peak + iDbinTol[iR, 1], n_vel_new))
Z_windowed = Z_linear[iT, iR, :] * np.roll(window_fcn, n_vel_new // 2 - idx_last_peak)
Z_windowed = Z_windowed[search_window] # Note: Think about index shift!
idx_new_peak = np.argmax(Z_windowed) + search_window[0]
# check if Doppler velocity jumps more than 120 bins from last _eak max to new(=one rg below) peak max
mean_idx_last_ts = int(np.mean(idx_peak_matrix[max(0, iT - k):min(iT + 1, n_ts), max(0, iR - 1):min(iR + k, n_rg)]))
if abs(idx_new_peak - mean_idx_last_ts) > jump:
logger.debug(f'jump at iT={iT} iR={iR}')
idx_new_peak = mean_idx_last_ts
search_window = range(max(idx_new_peak + iDbinTol[iR, 0], 0), min(idx_new_peak + iDbinTol[iR, 1], n_vel_new))
search_path[iT, iR, :] = [search_window[0], search_window[-1]] # for plotting
if search_window[0] < idx_new_peak < search_window[-1]:
# calc signal boundaries
_, _bnd = find_peak_edges(Z_linear[iT, iR, :], threshold=noise[iT, iR], imaxima=idx_new_peak)
# safety precautions, if idx-left-bound > idx-right-bound --> no signal
if _bnd[0] == _bnd[1] + 1:
# probably clear sky
idx_peak_matrix[iT, iR] = idx_last_peak
signal_boundaries[iT, iR, :] = [-1, -1]
else:
signal_boundaries[iT, iR, :] = _bnd
idx_peak_matrix[iT, iR] = idx_new_peak
idx_last_peak = idx_new_peak
# if show_triple == True, copy all signals including the triplication else copy only the main signal and not the triplication
_bnd_tmp = [None, None] if show_triple else _bnd
dealiased_spectra[iT, iR, _bnd_tmp[0]:_bnd_tmp[1]] = Z_linear[iT, iR, _bnd_tmp[0]:_bnd_tmp[1]]
dealiased_mask[iT, iR, _bnd_tmp[0]:_bnd_tmp[1]] = False
else:
# last peak stays the same, no integration boundaries
signal_boundaries[iT, iR, :] = [-1, -1]
idx_peak_matrix[iT, iR] = idx_last_peak
logger.debug(f'signal boundaries(iR == {iR}) = {signal_boundaries[iT, iR, :]} '
f'idx_peak_max {idx_peak_matrix[iT, iR]}, '
f'min val = noise floor : {Z_linear[iT, iR, :].min():.7f}, {noise[iT, iR]:.7f} ')
# clean up signal boundaries
signal_boundaries[(signal_boundaries <= 0) + (signal_boundaries >= n_vel_new)] = -1
return dealiased_spectra, dealiased_mask, velocity_new, signal_boundaries, search_path, idx_peak_matrix
def noise_estimation_uncompressed_data(data, n_std=6.0, **kwargs):
"""
Creates a dict containing the noise threshold, mean noise level,
the variance of the noise, the number of noise values in the spectrum,
and the boundaries of the main signal peak, if there is one
Args:
data (dict): data container, containing data['var'] of dimension (n_ts, n_range, n_Doppler_bins)
**n_std_deviations (float): threshold = number of standard deviations
above mean noise floor, default: threshold is the value of the first
non-noise value
Returns:
dict with noise floor estimation for all time and range points
"""
spectra3D = data['var'].copy()
n_ts, n_rg, n_vel = spectra3D.shape
if 'rg_offsets' in kwargs:
rg_offsets = np.copy(kwargs['rg_offsets'])
rg_offsets[0] = -1
rg_offsets[-1] += 1
else:
rg_offsets = [-1, n_rg + 1]
no_av = kwargs['no_av'] if 'no_av' in kwargs else [1]
# fill values needs to be masked for noise removal otherwise wrong results
spectra3D[spectra3D == -999.0] = np.nan
# Estimate Noise Floor for all chirps, time stemps and range gates aka. for all pixels
# Algorithm used: Hildebrand & Sekhon
# allocate numpy arrays
noise_est = {
'mean': np.zeros((n_ts, n_rg), dtype=np.float32),
'threshold': np.zeros((n_ts, n_rg), dtype=np.float32),
'variance': np.zeros((n_ts, n_rg), dtype=np.float32),
'numnoise': np.zeros((n_ts, n_rg), dtype=np.int32),
'signal': np.full((n_ts, n_rg), fill_value=True),
}
# gather noise level etc. for all chirps, range gates and times
logger.info(f'Noise estimation for uncompressed spectra....... ')
noise_free = np.isnan(spectra3D).any(axis=2)
iterator = product(range(n_ts), range(n_rg)) if logger.level > 20 else tqdm(product(range(n_ts), range(n_rg)), total=n_ts * n_rg, unit=' spectra')
for iT, iR in iterator:
if noise_free[iT, iR]: continue
mean, thresh, var, nnoise = estimate_noise_hs74(
spectra3D[iT, iR, :],
navg=no_av[getnointerval(rg_offsets, iR) - 1],
std_div=n_std
)
noise_est['mean'][iT, iR] = mean
noise_est['variance'][iT, iR] = var
noise_est['numnoise'][iT, iR] = nnoise
noise_est['threshold'][iT, iR] = thresh
noise_est['signal'][iT, iR] = nnoise < n_vel
return noise_est
def mira_noise_calculation(radar_const, SNRCorFaCo, HSDco, noise_power_co, range_ka):
"""
Args:
radar_const:
SNRCorFaCo:
HSDco:
noise_power_co:
range_ka:
Returns:
noise level in linear units
"""
noise_ka_lin = np.zeros(HSDco.shape)
for iT in range(len(radar_const)):
noise_ka_lin[iT, :] = radar_const[iT] * SNRCorFaCo[iT, :] * HSDco[iT, :] / noise_power_co[iT] * (range_ka / 5000.) ^ 2.
return noise_ka_lin
def getnointerval(intervals, i):
return bisect.bisect_left(intervals, i)
def seconds_to_fstring(time_diff):
return datetime.datetime.fromtimestamp(time_diff).strftime("%M:%S")
def despeckle2D(data, min_perc=80.0):
"""This function is used to remove all spectral lines for one time-range-pixel if surrounding% of the sourounding pixels are fill_values.
Args:
data (numpy.array): cloud radar Doppler spectra, dimensions: (time, range, velocity), unit: [mm6 mm-3 m s-1]
Keyword Args:
min_perc (float): minimum percentage value of neighbouring pixel, that need to be above the noise threshold
Returns:
mask (numpy.array, bool): where True = fill_value, and False = signal, dimensions: (time, range, velocity)
"""
# there must be high levels of reflection/scattering in this region to produce ghost echos
mask_2D = despeckle(np.all(data <= 0.0, axis=2), min_perc)
mask = data <= 0.0
for iBin in range(data.shape[2]):
mask[:, :, iBin] = mask_2D
return mask
def filter_ghost_1(data, rg, vel, offset, dBZ_thresh=-20.0, reduce_by=1.5, **kwargs):
"""This function is used to remove certain spectral lines "speckle ghost echoes" from all chirps of RPG FMCW 94GHz cloud radar spectra.
The speckle occur usually near the maximum unambiguous Doppler velocity.
Args:
data (numpy.array): cloud radar Doppler spectra, dimensions: (time, range, velocity), unit: [mm6 mm-3 m s-1]
rg (numpy.array): range values, unit [m]
vel (list of numpy.arrays): contains the Doppler velocity values for each chirp, dimension = n_chirps
offset (list, integer): range indices where the chirp changes takes place, dimension = n_chirps + 1 (starting with 0)
dBZ_thresh (float): values below will be considered as ghost echo
reduce_by (float): reduce the maximum unambiguous Doppler velocity by this amount in [m s-1]
**ignore_chirp1 (bool): Don't filter ghost echos of this type for first chirp (set to True if not given)
**Z_thresh (float): Ze in dBZ to be exceeded in lowest 500 m range for filter to be activated
Returns:
mask (numpy.array, bool): where True = fill_value, and False = signal, dimensions: (time, range, velocity)
"""
ignore_chirp1 = True if not 'ignore_chirp1' in kwargs else kwargs['ignore_chirp1']
# there must be high levels of reflection/scattering in this region to produce ghost echos
RG_MIN_, RG_MAX_ = 0.0, 500.0 # range interval
mask = data <= 0.0
reflectivity_thresh = 0.0 if not 'Z_thresh' in kwargs else kwargs['Z_thresh']
# check the if high signal occurred in 0m - 500m altitude (indicator for speckle ghost echos above)
dBZ_max = np.max(data[:, argnearest(rg, RG_MIN_):argnearest(rg, RG_MAX_), :], axis=2)
ts_to_mask = np.any(dBZ_max >= z2lin(reflectivity_thresh), axis=1)
signal_min = z2lin(dBZ_thresh)
n_vel = data.shape[2]
for iC in range(len(vel)):
if iC < 1 and ignore_chirp1:
continue # exclude first chirp because ghost is hidden under real signal anyway
idx_max_vel_new = argnearest(vel[iC], vel[iC][-1] - reduce_by)
for iV in range(n_vel - idx_max_vel_new):
mask[ts_to_mask, offset[iC]:offset[iC + 1], iV] = data[ts_to_mask, offset[iC]:offset[iC + 1], iV] < signal_min
for iV in range(idx_max_vel_new, n_vel):
mask[ts_to_mask, offset[iC]:offset[iC + 1], iV] = data[ts_to_mask, offset[iC]:offset[iC + 1], iV] < signal_min
return mask
def filter_ghost_2(data, rg, SL, first_offset, dBZ_thresh=-5.0, reduce_by=10.0):
"""This function is used to remove curtain-like ghost echoes
from the first chirp of RPG FMCW 94GHz cloud radar spectra.
Args:
data (numpy.array): cloud radar Doppler spectra, dimensions: (time, range, velocity), unit: [mm6 mm-3 m s-1]
rg (numpy.array): range values, unit [m]
SL (numpy.array): sensitivity limit, dimension: (time, range), unit: [mm6 mm-3]
first_offset (integer): range index where the first chirp change takes place
dBZ_thresh (float): minimum threshold in [dBZ], where ghost echos can be assumed
reduce_by (float): reduce the sensitivity limit by this amount of [dBZ]
Returns:
mask (numpy.array, bool): where True = fill_value, and False = signal, dimensions: (time, range, velocity)
"""
# there must be high levels of reflection/scattering in this region to produce ghost echos
RG_MIN_, RG_MAX_ = 1500.0, 6000.0 # range interval
mask = data <= 0.0
# check the if high signal occurred in 1500m - 4500m altitude (indicator for curtain like ghost echo)
dBZ_max = np.max(data[:, argnearest(rg, RG_MIN_):argnearest(rg, RG_MAX_), :], axis=2)
ts_to_mask = np.any(dBZ_max >= z2lin(dBZ_thresh), axis=1)
sens_lim = SL * reduce_by
for iT, mask_iT in enumerate(ts_to_mask):
if mask_iT:
for iV in range(data.shape[2]):
mask[iT, :first_offset, iV] = data[iT, :first_offset, iV] < sens_lim[iT, :first_offset]
return mask
def split_by_compression_status(var, mask):
indices = np.nonzero(mask[1:] != mask[:-1])[0] + 1
split_int = np.split(var, indices)
return split_int[0::2] if mask[0] else split_int[1::2]
def spectra2moments(ZSpec, paraminfo, **kwargs):
"""
This routine calculates the radar moments: reflectivity, mean Doppler velocity, spectrum width, skewness and
kurtosis from the level 0 spectrum files of the 94 GHz RPG cloud radar.
Args:
ZSpec (dict): list containing the dicts for each chrip of RPG-FMCW Doppler cloud radar
paraminfo (dict): information from params_[campaign].toml for the system LIMRAD94
Returns:
container_dict (dict): dictionary of larda containers, including larda container for Ze, VEL, sw, skew, kurt
"""
# initialize variables:
n_ts, n_rg, n_vel = ZSpec['VHSpec']['var'].shape
n_chirps = ZSpec['n_ch']
Z = np.full((n_ts, n_rg), np.nan)
V = np.full((n_ts, n_rg), np.nan)
SW = np.full((n_ts, n_rg), np.nan)
SK = np.full((n_ts, n_rg), np.nan)
K = np.full((n_ts, n_rg), np.nan)
spec_lin = ZSpec['VHSpec']['var'].copy()
mask = spec_lin <= 0.0
spec_lin[mask] = 0.0
# combine the mask for "contains signal" with "signal has more than 1 spectral line"
mask1 = np.all(mask, axis=2)
mask2 = ZSpec['edges'][:, :, 1] - ZSpec['edges'][:, :, 0] <= 0
mask3 = ZSpec['edges'][:, :, 1] - ZSpec['edges'][:, :, 0] >= n_vel
mask = mask1 * mask2 * mask3
for iC in range(n_chirps):
tstart = time.time()
for iR in range(ZSpec['rg_offsets'][iC], ZSpec['rg_offsets'][iC + 1]): # range dimension
for iT in range(n_ts): # time dimension
if mask[iT, iR]: continue
lb, rb = ZSpec['edges'][iT, iR, :]
Z[iT, iR], V[iT, iR], SW[iT, iR], SK[iT, iR], K[iT, iR] = \
radar_moment_calculation(spec_lin[iT, iR, lb:rb], ZSpec['vel'][iC][lb:rb], ZSpec['DoppRes'][iC])
logger.info(f'Chirp {iC + 1} Moments Calculated, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
moments = {'Ze': Z, 'VEL': V, 'sw': SW, 'skew': SK, 'kurt': K}
# create the mask where invalid values have been encountered
invalid_mask = np.full((ZSpec['VHSpec']['var'].shape[:2]), True)
invalid_mask[np.where(Z > 0.0)] = False
# despeckle the moments
if 'despeckle' in kwargs and kwargs['despeckle']:
tstart = time.time()
# copy and convert from bool to 0 and 1, remove a pixel if more than 20 neighbours are invalid (5x5 grid)
new_mask = despeckle(invalid_mask, 80.)
invalid_mask[new_mask] = True
logger.info(f'Despeckle done, elapsed time = {seconds_to_fstring(time.time() - tstart)} [min:sec]')
# mask invalid values with fill_value = -999.0
for mom in moments.keys():
moments[mom][invalid_mask] = -999.0
# build larda containers from calculated moments
container_dict = {mom: make_container_from_spectra([ZSpec], moments[mom], paraminfo[mom], invalid_mask, 'VHSpec') for mom in moments.keys()}
return container_dict
def heave_correction(moments, date, path_to_seapath="/projekt2/remsens/data_new/site-campaign/rv_meteor-eurec4a/instruments/RV-METEOR_DSHIP",
mean_hr=True, only_heave=False, use_cross_product=True, transform_to_earth=True, add=False):
"""Correct mean Doppler velocity for heave motion of ship (RV-Meteor)
Calculate heave rate from seapath measurements and create heave correction array. If Doppler velocity is given as an
input, correct it and return an array with the corrected Doppler velocities.
Without Doppler Velocity input, only the heave correction array is returned.
Args:
moments: LIMRAD94 moments container as returned by spectra2moments in spec2mom_limrad94.py, C1/2/3_Range,
SeqIntTime and Inc_ElA (for time (ts)) from LV1 file
date (datetime.datetime): object with date of current file
path_to_seapath (string): path where seapath measurement files (daily dat files) are stored
mean_hr (bool): whether to use the mean heave rate over the SeqIntTime or the heave rate at the start time of the chirp
only_heave (bool): whether to use only heave to calculate the heave rate or include pitch and roll induced heave
use_cross_product (bool): whether to use the cross product like Hannes Griesche https://doi.org/10.5194/amt-2019-434
transform_to_earth (bool): transform cross product to earth coordinate system as
described in https://repository.library.noaa.gov/view/noaa/17400
add (bool): whether to add the heave rate or subtract it
Returns:
A number of variables
- **new_vel** (*ndarray*); corrected Doppler velocities, same shape as moments["VEL"]["var"] or list if no Doppler
Velocity is given;
- **heave_corr** (*ndarray*): heave rate closest to each radar timestep for each height bin, same shape as
moments["VEL"]["var"];
- **seapath_out** (*pd.DataFrame*): data frame with all heave information from the closest time steps to the chirps
"""
####################################################################################################################
# Data Read in
####################################################################################################################
start = time.time()
logger.info(f"Starting heave correction for {date:%Y-%m-%d}")
seapath = read_seapath(date, path_to_seapath)
####################################################################################################################
# Calculating Heave Rate
####################################################################################################################
seapath = calc_heave_rate(seapath, only_heave=only_heave, use_cross_product=use_cross_product,
transform_to_earth=transform_to_earth)
####################################################################################################################
# Calculating heave correction array and add to Doppler velocity
####################################################################################################################
# make input container to calc_heave_corr function
container = {'C1Range': moments['C1Range'], 'C2Range': moments['C2Range'], 'C3Range': moments['C3Range'],
'SeqIntTime': moments['SeqIntTime'], 'ts': moments['Inc_ElA']['ts']}
heave_corr, seapath_out = calc_heave_corr(container, date, seapath, mean_hr=mean_hr)
try:
if add:
# create new Doppler velocity by adding the heave rate of the closest time step
new_vel = moments['VEL']['var'] + heave_corr
elif not add:
# create new Doppler velocity by subtracting the heave rate of the closest time step
new_vel = moments['VEL']['var'] - heave_corr
# set masked values back to -999 because they also get corrected
new_vel[moments['VEL']['mask']] = -999
logger.info(f"Done with heave corrections in {time.time() - start:.2f} seconds")
return new_vel, heave_corr, seapath_out
except KeyError:
logger.info(f"No input Velocities found! Cannot correct Doppler Velocity.\n Returning only heave_corr array!")
logger.info(f"Done with heave correction calculation only in {time.time() - start:.2f} seconds")
new_vel = ["I'm an empty list!"] # create an empty list to return the same number of variables
return new_vel, heave_corr, seapath_out
def heave_correction_spectra(data, date,
path_to_seapath="/projekt2/remsens/data_new/site-campaign/rv_meteor-eurec4a/instruments/RV-METEOR_DSHIP",
mean_hr=True, only_heave=False, use_cross_product=True, transform_to_earth=True, add=False,
**kwargs):
"""Shift Doppler spectra to correct for heave motion of ship (RV-Meteor)
Calculate heave rate from seapath measurements and create heave correction array. Translate the heave correction to
a number spectra bins by which to move each spectra. If Spectra are given, shift them and return a 3D array with the
shifted spectra.
Without spectra input, only the heave correction array and the array with the number if bins to move is returned.
Args:
data: LIMRAD94 data container filled with spectra and C1/2/3_Range, SeqIntTime, MaxVel, DoppLen from LV1 file;
for Claudia's version the mean Doppler velocity is also needed
date (datetime.datetime): object with date of current file
path_to_seapath (string): path where seapath measurement files (daily dat files) are stored
mean_hr (bool): whether to use the mean heave rate over the SeqIntTime or the heave rate at the start time of the chirp
only_heave (bool): whether to use only heave to calculate the heave rate or include pitch and roll induced heave
use_cross_product (bool): whether to use the cross product like Hannes Griesche https://doi.org/10.5194/amt-2019-434
transform_to_earth (bool): transform cross product to earth coordinate system as described in https://repository.library.noaa.gov/view/noaa/17400
add (bool): whether to add the heave rate or subtract it
**kwargs:
shift (int): number of time steps to shift seapath data
version (str): which version to use, 'ca' or 'jr'
Returns:
A number of variables
- **new_spectra** (*ndarray*); corrected Doppler velocities, same shape as data["VHSpec"]["var"] or list if no Doppler
Spectra are given;
- **heave_corr** (*ndarray*): heave rate closest to each radar timestep for each height bin, shape = (time x range);
- **seapath_out** (*pd.DataFrame*): data frame with all heave information from the closest time steps to the chirps
"""
# unpack kwargs
version = kwargs['version'] if 'version' in kwargs else 'jr'
shift = kwargs['shift'] if 'shift' in kwargs else 0
####################################################################################################################
# Data Read in
####################################################################################################################
start = time.time()
logger.info(f"Starting heave correction for {date:%Y-%m-%d}")
if version == 'ca':
seapath = read_seapath(date, path_to_seapath, output_format='xarray')
seapath = f_shiftTimeDataset(seapath)
elif version == 'jr':
seapath = read_seapath(date, path_to_seapath)
####################################################################################################################
# Calculating Heave Rate
####################################################################################################################
if version == 'ca':
seapath = calc_heave_rate_claudia(seapath)
elif version == 'jr':
seapath = calc_heave_rate(seapath, only_heave=only_heave, use_cross_product=use_cross_product,
transform_to_earth=transform_to_earth)
####################################################################################################################
# Calculate time shift between radar and ship and shift radar time or seapath time depending on version
####################################################################################################################
chirp_ts = calc_chirp_timestamps(data['VHSpec']['ts'], date, version='center')
rg_borders = get_range_bin_borders(3, data)
# transform bin boundaries, necessary because python starts counting at 0
rg_borders_id = rg_borders - np.array([0, 1, 1, 1])
# setting the length of the mean doppler velocity time series for calculating time shift
n_ts_run = np.int(10 * 60 / 1.5) # 10 minutes with time res of 1.5 s
if version == 'ca':
# here seapath is a xarray DataSet
seapath = seapath.dropna('time_shifted') # drop nans for interpolation
seapath_time = seapath['time_shifted'].values.astype(float) / 10 ** 9 # get nan free time in seconds
elif version == 'jr':
# here seapath is a pandas DataFrame
seapath = seapath.dropna()
seapath_time = seapath.index.values.astype(float) / 10 ** 9 # get nan free time in seconds
# prepare interpolation function for angular velocity
Cs = CubicSpline(seapath_time, seapath['heave_rate_radar'])
plot_path = f'/projekt2/remsens/data_new/site-campaign/rv_meteor-eurec4a/instruments/LIMRAD94/cloudnet_input_heave_cor_{version}/time_shift_plots'
delta_t_min = -3. # minimum time shift
delta_t_max = 3. # maximum time shift
# find a 10 minute mdv time series in every hour of radar data and for each chirp if possible
# calculate time shift for each hour and each chirp
chirp_ts_shifted, time_shift_array = calc_shifted_chirp_timestamps(data['mdv']['ts'], data['mdv']['var'],
chirp_ts, rg_borders_id, n_ts_run, Cs,
no_chirps=3, pathFig=plot_path,
delta_t_min=delta_t_min,
delta_t_max=delta_t_max,
date=date, plot_fig=True)
if shift != 0:
seapath = shift_seapath(seapath, shift)
else:
logger.debug(f"Shift is {shift}! Seapath data is not shifted!")
####################################################################################################################
# Calculating heave correction array and translate to number of Doppler bin shifts
####################################################################################################################
if version == 'ca':
# calculate the correction matrix
heave_corr = calc_corr_matrix_claudia(data['mdv']['ts'], data['mdv']['rg'], rg_borders_id, chirp_ts_shifted, Cs)
seapath_out = seapath
elif version == 'jr':
# make input container for calc_heave_corr function
container = {'C1Range': data['C1Range'], 'C2Range': data['C2Range'], 'C3Range': data['C3Range'],
'SeqIntTime': data['SeqIntTime'], 'ts': data['VHSpec']['ts'], 'MaxVel': data['MaxVel'],
'DoppLen': data["DoppLen"]}
heave_corr, seapath_out = calc_heave_corr(container, chirp_ts_shifted, seapath, mean_hr=mean_hr)
no_chirps = len(data['DoppLen'])
range_bins = get_range_bin_borders(no_chirps, data)
doppler_res = calc_dopp_res(data['MaxVel'], data['DoppLen'], no_chirps, range_bins)
n_dopp_bins_shift, heave_corr = heave_rate_to_spectra_bins(heave_corr, doppler_res)
####################################################################################################################
# Shifting spectra and writing to new 3D array
####################################################################################################################
try:
# correct spectra for heave rate by moving it by the corresponding number of Doppler bins
spectra = data['VHSpec']['var']
new_spectra = np.empty_like(spectra)
for iT in range(data['n_ts']):
# loop through time steps
for iR in range(data['n_rg']):
# loop through range gates
# TODO: check if mask is True and skip, although masked shifted spectra do not introduce any error,
# this might speed up things...
try:
shift = int(n_dopp_bins_shift[iT, iR])
except ValueError:
logger.debug(f"shift at [{iT}, {iR}] is NaN, set to zero")
shift = 0
spectrum = spectra[iT, iR, :]
if add:
new_spec = np.roll(spectrum, shift)
elif not add:
new_spec = np.roll(spectrum, -shift)
new_spectra[iT, iR, :] = new_spec
logger.info(f"Done with heave corrections in {time.time() - start:.2f} seconds")
return new_spectra, heave_corr, n_dopp_bins_shift, seapath_out, time_shift_array
except KeyError:
logger.info(f"No input spectra found! Cannot shift spectra.\n Returning only heave_corr and n_dopp_bins_shift array!")
logger.info(f"Done with heave correction calculation only in {time.time() - start:.2f} seconds")
new_spectra = ["I'm an empty list!"] # create an empty list to return the same number of variables
return new_spectra, heave_corr, n_dopp_bins_shift, seapath_out, time_shift_array
def read_seapath(date, path="/projekt2/remsens/data_new/site-campaign/rv_meteor-eurec4a/instruments/RV-METEOR_DSHIP",
**kwargs):
"""
Read in daily Seapath measurements from RV Meteor from .dat files to a pandas.DataFrame
Args:
date (datetime.datetime): object with date of current file
path (str): path to seapath files
kwargs for read_csv
output_format (str): whether a pandas data frame or a xarray dataset is returned
Returns:
seapath (DataFrame): DataFrame with Seapath measurements
"""
# Seapath attitude and heave data 1 or 10 Hz, choose file depending on date
start = time.time()
# unpack kwargs
nrows = kwargs['nrows'] if 'nrows' in kwargs else None
skiprows = kwargs['skiprows'] if 'skiprows' in kwargs else (1, 2)
output_format = kwargs['output_format'] if 'output_format' in kwargs else 'pandas'
if date < datetime.datetime(2020, 1, 27):
file = f"{date:%Y%m%d}_DSHIP_seapath_1Hz.dat"
else:
file = f"{date:%Y%m%d}_DSHIP_seapath_10Hz.dat"
# set encoding and separator, skip the rows with the unit and type of measurement
seapath = pd.read_csv(f"{path}/{file}", encoding='windows-1252', sep="\t", skiprows=skiprows, na_values=-999.00,
index_col='date time', nrows=nrows)
# transform index to datetime
seapath.index = pd.to_datetime(seapath.index, infer_datetime_format=True)
seapath.index.name = 'time'
seapath.columns = ['yaw', 'heave', 'pitch', 'roll'] # rename columns
logger.info(f"Done reading in Seapath data in {time.time() - start:.2f} seconds")
if output_format == 'pandas':
pass
elif output_format == 'xarray':
seapath = seapath.to_xarray()
return seapath
def read_dship(date, **kwargs):
"""Read in 1 Hz DSHIP data and return pandas DataFrame
Args:
date (str): yyyymmdd (eg. 20200210)
**kwargs: kwargs for pd.read_csv (not all implemented) https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
Returns:
pd.DataFrame with 1 Hz DSHIP data
"""
tstart = time.time()
path = kwargs['path'] if 'path' in kwargs else "/projekt2/remsens/data_new/site-campaign/rv_meteor-eurec4a/instruments/RV-METEOR_DSHIP"
skiprows = kwargs['skiprows'] if 'skiprows' in kwargs else (1, 2)
nrows = kwargs['nrows'] if 'nrows' in kwargs else None
cols = kwargs['cols'] if 'cols' in kwargs else None # always keep the 0th column (datetime column)
file = f"{path}/RV-Meteor_DSHIP_all_1Hz_{date}.dat"
# set encoding and separator, skip the rows with the unit and type of measurement, set index column
df = pd.read_csv(file, encoding='windows-1252', sep="\t", skiprows=skiprows, index_col='date time', nrows=nrows,
usecols=cols)
df.index = pd.to_datetime(df.index, infer_datetime_format=True)
logger.info(f"Done reading in DSHIP data in {time.time() - tstart:.2f} seconds")
return df
def f_shiftTimeDataset(dataset):
"""
author: <NAME>
date: 25 november 2020
goal : function to shift time variable of the dataset to the central value of the time interval
of the time step
input:
dataset: xarray dataset
output:
dataset: xarray dataset with the time coordinate shifted added to the coordinates and the variables now referring to the shifted time array
"""
# reading time array
time = dataset['time'].values
# calculating deltaT using consecutive time stamps
deltaT = time[2] - time[1]
# print('delta T for the selected dataset: ', deltaT)
# defining additional coordinate to the dataset
dataset.coords['time_shifted'] = dataset['time'] + 0.5 * deltaT
# exchanging coordinates in the dataset
datasetNew = dataset.swap_dims({'time': 'time_shifted'})
return (datasetNew)
def calc_heave_rate(seapath, x_radar=-11, y_radar=4.07, z_radar=15.8, only_heave=False, use_cross_product=True,
transform_to_earth=True):
"""
Calculate heave rate at a certain location of a ship with the measurements of the INS
Args:
seapath (pd.DataFrame): Data frame with heading, roll, pitch and heave as columns
x_radar (float): x position of location with respect to INS in meters
y_radar (float): y position of location with respect to INS in meters
z_radar (float): z position of location with respect to INS in meters
only_heave (bool): whether to use only heave to calculate the heave rate or include pitch and roll induced heave
use_cross_product (bool): whether to use the cross product like <NAME> https://doi.org/10.5194/amt-2019-434
transform_to_earth (bool): transform cross product to earth coordinate system as described in https://repository.library.noaa.gov/view/noaa/17400
Returns:
seapath (pd.DataFrame): Data frame as input with additional columns radar_heave, pitch_heave, roll_heave and
"heave_rate"
"""
t1 = time.time()
logger.info("Calculating Heave Rate...")
# angles in radians
pitch = np.deg2rad(seapath["pitch"])
roll = np.deg2rad(seapath["roll"])
yaw = np.deg2rad(seapath["yaw"])
# time delta between two time steps in seconds
d_t = np.ediff1d(seapath.index).astype('float64') / 1e9
if not use_cross_product:
logger.info("using a simple geometric approach")
if not only_heave:
logger.info("using also the roll and pitch induced heave")
pitch_heave = x_radar * np.tan(pitch)
roll_heave = y_radar * np.tan(roll)
elif only_heave:
logger.info("using only the ships heave")
pitch_heave = 0
roll_heave = 0
# sum up heave, pitch induced and roll induced heave
seapath["radar_heave"] = seapath["heave"] + pitch_heave + roll_heave
# add pitch and roll induced heave to data frame to include in output for quality checking
seapath["pitch_heave"] = pitch_heave
seapath["roll_heave"] = roll_heave
# ediff1d calculates the difference between consecutive elements of an array
# heave difference / time difference = heave rate
heave_rate = np.ediff1d(seapath["radar_heave"]) / d_t
else:
logger.info("Using the cross product approach from <NAME>")
# change of angles with time
d_roll = np.ediff1d(roll) / d_t # phi
d_pitch = np.ediff1d(pitch) / d_t # theta
d_yaw = np.ediff1d(yaw) / d_t # psi
seapath_heave_rate = np.ediff1d(seapath["heave"]) / d_t # heave rate at seapath
pos_radar = np.array([x_radar, y_radar, z_radar]) # position of radar as a vector
ang_rate = np.array([d_roll, d_pitch, d_yaw]).T # angle velocity as a matrix
pos_radar_exp = np.tile(pos_radar, (ang_rate.shape[0], 1)) # expand to shape of ang_rate
cross_prod = np.cross(ang_rate, pos_radar_exp) # calculate cross product
if transform_to_earth:
logger.info("Transform into Earth Coordinate System")
phi, theta, psi = roll, pitch, yaw
a1 = np.cos(theta) * np.cos(psi)
a2 = -1 * np.cos(phi) * np.sin(psi) + np.sin(theta) * np.cos(psi) * np.sin(phi)
a3 = np.sin(phi) * np.sin(psi) + np.cos(phi) * np.sin(theta) * np.cos(psi)
b1 = np.cos(theta) * np.sin(psi)
b2 = np.cos(phi) * np.cos(psi) + np.sin(theta) * np.sin(phi) * np.sin(psi)
b3 = -1 * np.cos(psi) * np.sin(phi) + np.cos(phi) * np.sin(theta) * np.sin(psi)
c1 = -1 * np.sin(theta)
c2 = np.cos(theta) * np.sin(phi)
c3 = np.cos(theta) * np.cos(phi)
Q_T = np.array([[a1, a2, a3], [b1, b2, b3], [c1, c2, c3]])
# remove first entry of Q_T to match dimension of cross_prod
Q_T = Q_T[:, :, 1:]
cross_prod = np.einsum('ijk,kj->kj', Q_T, cross_prod)
heave_rate = seapath_heave_rate + cross_prod[:, 2] # calculate heave rate
# add heave rate to seapath data frame
# the first calculated heave rate corresponds to the second time step
heave_rate = pd.DataFrame({'heave_rate_radar': heave_rate}, index=seapath.index[1:])
seapath = seapath.join(heave_rate)
logger.info(f"Done with heave rate calculation in {time.time() - t1:.2f} seconds")
return seapath
def f_calcRMatrix(rollShipArr, pitchShipArr, yawShipArr, NtimeShip):
"""
author: <NAME>
date : 27/10/2020
goal: function to calculate R matrix given roll, pitch, yaw
input:
rollShipArr: roll array in degrees
pitchShipArr: pitch array in degrees
yawShipArr: yaw array in degrees
NtimeShip: dimension of time array for the definition of R_inv as [3,3,dimTime]
output:
R[3,3,Dimtime]: array of rotational matrices, one for each time stamp
"""
# calculation of the rotational matrix for each time stamp of the ship data for the day
cosTheta = np.cos(np.deg2rad(rollShipArr))
senTheta = np.sin(np.deg2rad(rollShipArr))
cosPhi = np.cos(np.deg2rad(pitchShipArr))
senPhi = np.sin(np.deg2rad(pitchShipArr))
cosPsi = np.cos(np.deg2rad(yawShipArr))
senPsi = np.sin(np.deg2rad(yawShipArr))
R = np.zeros([3, 3, NtimeShip])
A = np.zeros([3, 3, NtimeShip])
B = np.zeros([3, 3, NtimeShip])
C = np.zeros([3, 3, NtimeShip])
R.fill(np.nan)
A.fill(0.)
B.fill(0.)
C.fill(0.)
# indexing for the matrices
# [0,0] [0,1] [0,2]
# [1,0] [1,1] [1,2]
# [2,0] [2,1] [2,2]
A[0, 0, :] = 1
A[1, 1, :] = cosTheta
A[1, 2, :] = -senTheta
A[2, 1, :] = senTheta
A[2, 2, :] = cosTheta
B[0, 0, :] = cosPhi
B[1, 1, :] = 1
B[2, 2, :] = cosPhi
B[0, 2, :] = senPhi
B[2, 0, :] = -senPhi
C[0, 0, :] = cosPsi
C[0, 1, :] = -senPsi
C[2, 2, :] = 1
C[1, 0, :] = senPsi
C[1, 1, :] = cosPsi
# calculation of the rotation matrix
A = np.moveaxis(A, 2, 0)
B = np.moveaxis(B, 2, 0)
C = np.moveaxis(C, 2, 0)
R = np.matmul(C, np.matmul(B, A))
R = np.moveaxis(R, 0, 2)
return R
def calc_heave_rate_claudia(data, x_radar=-11, y_radar=4.07, z_radar=-15.8):
"""Calculate heave rate at a certain location on a ship according to <NAME>'s approach
Args:
data (xr.DataSet): Data Set with heading, roll, pitch and heave as columns
x_radar (float): x position of location with respect to INS in meters
y_radar (float): y position of location with respect to INS in meters
z_radar (float): z position of location with respect to INS in meters
Returns: xr.DataSet with additional variable heave_rate
"""
r_radar = [x_radar, y_radar, z_radar]
# calculation of w_ship
heave = data['heave'].values
timeShip = data['time_shifted'].values.astype('float64') / 10 ** 9
w_ship = np.diff(heave, prepend=np.nan) / np.diff(timeShip, prepend=np.nan)
# calculating rotational terms
roll = data['roll'].values
pitch = data['pitch'].values
yaw = data['yaw'].values
NtimeShip = len(timeShip)
r_ship = np.zeros((3, NtimeShip))
# calculate the position of the radar on the ship r_ship:
R = f_calcRMatrix(roll, pitch, yaw, NtimeShip)
for i in range(NtimeShip):
r_ship[:, i] = np.dot(R[:, :, i], r_radar)
# calculating vertical component of the velocity of the radar on the ship (v_rot)
w_rot = np.diff(r_ship[2, :], prepend=np.nan) / np.diff(timeShip, prepend=np.nan)
# calculating total ship velocity at radar
heave_rate = w_rot + w_ship
data['w_rot'] = (('time_shifted'), w_rot)
data['heave_rate'] = (('time_shifted'), w_ship)
data['heave_rate_radar'] = (('time_shifted',), heave_rate)
return data
def find_mdv_time_series(mdv_values, radar_time, n_ts_run):
"""
author: <NAME>, <NAME>
Identify, given a mean doppler velocity matrix, a sequence of length n_ts_run of values in the matrix
at a given height that contains the minimum possible amount of nan values in it.
Args:
mdv_values (ndarray): time x heigth matrix of Doppler Velocity
radar_time (ndarray): corresponding radar time stamps in seconds (unix time)
n_ts_run (int): number of timestamps needed in a mdv series
Returns:
valuesTimeSerie (ndarray): time series of Doppler velocity with length n_ts_run
time_series (ndarray): corresponding time stamps to Doppler velocity time series
i_height_sel (int): index of chosen height
valuesColumnMean (ndarray): time series of mean Doppler velocity averaged over height with length n_ts_run
"""
# concept: scan the matrix using running mean for every height, and check the number of nans in the selected serie.
nanAmountMatrix = np.zeros((mdv_values.shape[0] - n_ts_run, mdv_values.shape[1]))
nanAmountMatrix.fill(np.nan)
for indtime in range(mdv_values.shape[0] - n_ts_run):
mdvChunk = mdv_values[indtime:indtime + n_ts_run, :]
# count number of nans in each height
nanAmountMatrix[indtime, :] = np.sum(np.isnan(mdvChunk), axis=0)
# find indeces where nanAmount is minimal
ntuples = np.where(nanAmountMatrix == np.nanmin(nanAmountMatrix))
i_time_sel = ntuples[0][0]
i_height_sel = ntuples[1][0]
# extract corresponding time series of mean Doppler velocity values for the chirp
valuesTimeSerie = mdv_values[i_time_sel:i_time_sel + n_ts_run, i_height_sel]
time_series = radar_time[i_time_sel:i_time_sel + n_ts_run]
###### adding test for columns ########
valuesColumn = mdv_values[i_time_sel:i_time_sel + n_ts_run, :]
valuesColumnMean = np.nanmean(valuesColumn, axis=1)
return valuesTimeSerie, time_series, i_height_sel, valuesColumnMean
def calc_time_shift(w_radar_meanCol, delta_t_min, delta_t_max, resolution, w_ship_chirp, timeSerieRadar, pathFig, chirp,
hour, date):
"""
author: <NAME>, <NAME>, <NAME>
goal: calculate and estimation of the time lag between the radar time stamps and the ship time stamp
NOTE: adding or subtracting the obtained time shift depends on what you did
during the calculation of the covariances: if you added/subtracted time _shift
to t_radar you have to do the same for the 'exact time'
Here is the time shift analysis as plot:
<ww> is short for <w'_ship*w'_radar> i.e. covariance between vertical speeds from
ship movements and radar its maximum gives an estimate for optimal agreement in
vertical velocities of ship and radar
<Delta w^2> is short for <(w[i]-w[i-1])^2> where w = w_rad - 2*w_ship - this
is a measure for the stripeness. Its minimum gives an
estimate how to get the smoothest w data
Args:
w_radar_meanCol (ndarray): time series of mean Doppler velocity averaged over height with no nan values
delta_t_min (float): minimum time shift
delta_t_max (float): maximum time shift
resolution (float): time step by which to increment possible time shift
w_ship_chirp (ndarray): vertical velocity of the radar at the exact chirp time step
timeSerieRadar (ndarray): time stamps of the mean Doppler velocity time series (w_radar_meanCol)
pathFig (str): file path where figures should be stored
chirp (int): which chirp is being processed
hour (int): which hour of the day is being processed (0-23)
date (datetime): which day is being processed
Returns: time shift between radar data and ship data in seconds, quicklooks for each calculation
"""
fontSizeTitle = 12
fontSizeX = 12
fontSizeY = 12
plt.gcf().subplots_adjust(bottom=0.15)
# calculating variation for w_radar
w_prime_radar = w_radar_meanCol - np.nanmean(w_radar_meanCol)
# calculating covariance between w-ship and w_radar where w_ship is shifted for each deltaT given by DeltaTimeShift
DeltaTimeShift = np.arange(delta_t_min, delta_t_max, step=resolution)
cov_ww = np.zeros(len(DeltaTimeShift))
deltaW_ship = np.zeros(len(DeltaTimeShift))
for i in range(len(DeltaTimeShift)):
# calculate w_ship interpolating it on the new time array (timeShip+deltatimeShift(i))
T_corr = timeSerieRadar + DeltaTimeShift[i]
# interpolating w_ship on the shifted time series
cs_ship = CubicSpline(timeSerieRadar, w_ship_chirp)
w_ship_shifted = cs_ship(T_corr)
# calculating w_prime_ship with the new interpolated series
w_ship_prime = w_ship_shifted - np.nanmean(w_ship_shifted)
# calculating covariance of the prime series
cov_ww[i] = np.nanmean(w_ship_prime * w_prime_radar)
# calculating sharpness deltaW_ship
w_corrected = w_radar_meanCol - w_ship_shifted
delta_w = (np.ediff1d(w_corrected)) ** 2
deltaW_ship[i] = np.nanmean(delta_w)
# calculating max of covariance and min of deltaW_ship
minDeltaW = np.nanmin(deltaW_ship)
indMin = np.where(deltaW_ship == minDeltaW)[0][0]
maxCov_w = np.nanmax(cov_ww)
indMax = np.where(cov_ww == maxCov_w)[0][0]
try:
logger.info(f'Time shift found for chirp {chirp} at hour {hour}: {DeltaTimeShift[indMin]}')
# calculating time shift for radar data
timeShift_chirp = DeltaTimeShift[indMin]
# if time shift is equal delta_t_min it's probably false -> set it to 0
if np.abs(timeShift_chirp) == np.abs(delta_t_min):
timeShift_chirp = 0
# plot results
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 6))
fig.tight_layout()
ax = plt.subplot(1, 1, 1)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.plot(DeltaTimeShift, cov_ww, color='red', linestyle=':', label='cov_ww')
ax.axvline(x=DeltaTimeShift[indMax], color='red', linestyle=':', label='max cov_w')
ax.plot(DeltaTimeShift, deltaW_ship, color='red', label='Deltaw^2')
ax.axvline(x=DeltaTimeShift[indMin], color='red', label='min Deltaw^2')
ax.legend(frameon=False)
# ax.xaxis_date()
ax.set_ylim(-0.1, 2.) # limits of the y-axesn cmap=plt.cm.get_cmap("viridis", 256)
ax.set_xlim(delta_t_min, delta_t_max) # limits of the x-axes
ax.set_title(
f'Covariance and Sharpiness for chirp {chirp}: {date:%Y-%m-%d} hour: {hour}, '
f'time lag found : {DeltaTimeShift[indMin]}',
fontsize=fontSizeTitle, loc='left')
ax.set_xlabel("Time Shift [seconds]", fontsize=fontSizeX)
ax.set_ylabel('w [m s$^{-1}$]', fontsize=fontSizeY)
fig.tight_layout()
fig.savefig(f'{pathFig}/{date:%Y%m%d}_timeShiftQuicklook_chirp{chirp}_hour{hour}.png', format='png')
plt.close()
except IndexError:
logger.info(f'Not enough data points for time shift calculation in chirp {chirp} at hour {hour}!')
timeShift_chirp = 0
return timeShift_chirp
def calc_chirp_timestamps(radar_ts, date, version):
""" Calculate the exact timestamp for each chirp corresponding with the center or start of the chirp
The timestamp in the radar file corresponds to the end of a chirp sequence with an accuracy of 0.1 s
Args:
radar_ts (ndarray): timestamps of the radar with milliseconds in seconds
date (datetime.datetime): date which is being processed
version (str): should the timestamp correspond to the 'center' or the 'start' of the chirp
Returns: dict with chirp timestamps
"""
# make lookup table for chirp durations for each chirptable (see projekt1/remsens/hardware/LIMRAD94/chirptables)
chirp_durations = pd.DataFrame({"Chirp_No": (1, 2, 3), "tradewindCU": (1.022, 0.947, 0.966),
"Doppler1s": (0.239, 0.342, 0.480), "Cu_small_Tint": (0.225, 0.135, 0.181),
"Cu_small_Tint2": (0.562, 0.572, 0.453)})
# calculate start time of each chirp by subtracting the duration of the later chirp(s) + the chirp itself
# the timestamp then corresponds to the start of the chirp
# select chirp durations according to date
if date < datetime.datetime(2020, 1, 29, 18, 0, 0):
chirp_dur = chirp_durations["tradewindCU"]
elif date < datetime.datetime(2020, 1, 30, 15, 3, 0):
chirp_dur = chirp_durations["Doppler1s"]
elif date < datetime.datetime(2020, 1, 31, 22, 28, 0):
chirp_dur = chirp_durations["Cu_small_Tint"]
else:
chirp_dur = chirp_durations["Cu_small_Tint2"]
chirp_timestamps = dict()
if version == 'center':
chirp_timestamps["chirp_1"] = radar_ts - chirp_dur[0] - chirp_dur[1] - chirp_dur[2] / 2
chirp_timestamps["chirp_2"] = radar_ts - chirp_dur[1] - chirp_dur[2] / 2
chirp_timestamps["chirp_3"] = radar_ts - chirp_dur[2] / 2
else:
chirp_timestamps["chirp_1"] = radar_ts - chirp_dur[0] - chirp_dur[1] - chirp_dur[2]
chirp_timestamps["chirp_2"] = radar_ts - chirp_dur[1] - chirp_dur[2]
chirp_timestamps["chirp_3"] = radar_ts - chirp_dur[2]
return chirp_timestamps
def calc_shifted_chirp_timestamps(radar_ts, radar_mdv, chirp_ts, rg_borders_id, n_ts_run, Cs_w_radar, **kwargs):
"""
Calculates the time shift between each chirp time stamp and the ship time stamp for every hour and every chirp.
Args:
radar_ts (ndarray): radar time stamps in seconds (unix time)
radar_mdv (ndarray): time x height matrix of mean Doppler velocity from radar
chirp_ts (ndarray): exact chirp time stamps
rg_borders_id (ndarray): indices of chirp boundaries
n_ts_run (int): number of time steps necessary for mean Doppler velocity time series
Cs_w_radar (scipy.interpolate.CubicSpline): function of vertical velocity of radar against time
**kwargs:
no_chirps (int): number of chirps in radar measurement
plot_fig (bool): plot quicklook
Returns: time shifted chirp time stamps, array with time shifts for each chirp and hour
"""
no_chirps = kwargs['no_chirps'] if 'no_chirps' in kwargs else 3
delta_t_min = kwargs['delta_t_min'] if 'delta_t_min' in kwargs else radar_ts[0] - radar_ts[1]
delta_t_max = kwargs['delta_t_max'] if 'delta_t_max' in kwargs else radar_ts[1] - radar_ts[0]
resolution = kwargs['resolution'] if 'resolution' in kwargs else 0.05
pathFig = kwargs['pathFig'] if 'pathFig' in kwargs else "./tmp"
date = kwargs['date'] if 'date' in kwargs else pd.to_datetime(radar_ts[0], unit='s')
plot_fig = kwargs['plot_fig'] if 'plot_fig' in kwargs else False
time_shift_array = np.zeros((len(radar_ts), no_chirps))
chirp_ts_shifted = chirp_ts
# get total hours in data and then loop through each hour
hours = np.int(np.ceil(radar_ts.shape[0] * np.mean(np.diff(radar_ts)) / 60 / 60))
idx = np.int(np.floor(len(radar_ts) / hours))
for i in range(hours):
start_idx = i * idx
if i < hours-1:
end_idx = (i + 1) * idx
else:
end_idx = time_shift_array.shape[0]
for j in range(no_chirps):
# set time and range slice
ts_slice, rg_slice = slice(start_idx, end_idx), slice(rg_borders_id[j], rg_borders_id[j + 1])
mdv_slice = radar_mdv[ts_slice, rg_slice]
time_slice = chirp_ts[f'chirp_{j + 1}'][
ts_slice] # select the corresponding exact chirp time for the mdv slice
mdv_series, time_mdv_series, height_id, mdv_mean_col = find_mdv_time_series(mdv_slice, time_slice,
n_ts_run)
# selecting w_radar values of the chirp over the same time interval as the mdv_series
w_radar_chirpSel = Cs_w_radar(time_mdv_series)
# calculating time shift for the chirp and hour if at least n_ts_run measurements are available
if np.sum(~np.isnan(mdv_mean_col)) == n_ts_run:
time_shift_array[ts_slice, j] = calc_time_shift(mdv_mean_col, delta_t_min, delta_t_max, resolution,
w_radar_chirpSel, time_mdv_series,
pathFig, j + 1, i, date)
# recalculate exact chirp time including time shift due to lag
chirp_ts_shifted[f'chirp_{j + 1}'][ts_slice] = chirp_ts[f'chirp_{j + 1}'][ts_slice] - time_shift_array[
ts_slice, j]
# get w_radar at the time shifted exact chirp time stamps
w_radar_exact = Cs_w_radar(chirp_ts_shifted[f'chirp_{j + 1}'][ts_slice])
if plot_fig:
# plot mdv time series and shifted radar heave rate
ts_idx = [argnearest(chirp_ts_shifted[f'chirp_{j + 1}'][ts_slice], t) for t in time_mdv_series]
plot_time = pd.to_datetime(time_mdv_series, unit='s')
plot_df = pd.DataFrame(dict(time=plot_time, mdv_mean_col=mdv_mean_col,
w_radar_org=Cs_w_radar(time_mdv_series),
w_radar_chirpSel=w_radar_chirpSel,
w_radar_exact_shifted=w_radar_exact[ts_idx])).set_index('time')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 6))
ax.plot(plot_df['mdv_mean_col'], color='red', label='mean mdv over column at original radar time')
ax.plot(plot_df['w_radar_org'], color='blue', linewidth=0.2, label='w_radar at original radar time')
ax.plot(plot_df['w_radar_chirpSel'], color='blue', label='w_radar at original chirp time')
ax.plot(plot_df['w_radar_exact_shifted'], '.', color='green', label='w_radar shifted')
ax.set_ylim(-4., 2.)
ax.legend(frameon=False)
# limits of the y-axesn cmap=plt.cm.get_cmap("viridis", 256)
ax.set_title(
f'Velocity for Time Delay Calculations : {date:%Y-%m-%d} shift = {time_shift_array[start_idx, j]}',
loc='left')
ax.set_xlabel("Time [day hh:mm]")
ax.set_ylabel('w [m s$^{-1}$]')
ax.xaxis_date()
ax.grid()
fig.autofmt_xdate()
fig.savefig(f'{pathFig}/{date:%Y%m%d}_time-series_mdv_w-radar_chirp{j + 1}_hour{i}.png')
plt.close()
return chirp_ts_shifted, time_shift_array
def calc_corr_matrix_claudia(radar_ts, radar_rg, rg_borders_id, chirp_ts_shifted, Cs_w_radar):
"""
Calculate the correction matrix to correct the mean Doppler velocity for the ship vertical motion.
Args:
radar_ts (ndarray): original radar time stamps in seconds (unix time)
radar_rg (ndarray): radar range gates
rg_borders_id (ndarray): indices of chirp boundaries
chirp_ts_shifted (dict): hourly shifted chirp time stamps
Cs_w_radar (scipy.interpolate.CubicSpline): function of vertical velocity of radar against time
Returns: correction matrix for mean Doppler velocity
"""
no_chirps = len(chirp_ts_shifted)
corr_matrix = np.zeros((len(radar_ts), len(radar_rg)))
# get total hours in data and then loop through each hour
hours = np.int(np.ceil(radar_ts.shape[0] * np.mean(np.diff(radar_ts)) / 60 / 60))
# divide the day in equal hourly slices
idx = np.int(np.floor(len(radar_ts) / hours))
for i in range(hours):
start_idx = i * idx
if i < hours-1:
end_idx = (i + 1) * idx
else:
end_idx = len(radar_ts)
for j in range(no_chirps):
# set time and range slice
ts_slice, rg_slice = slice(start_idx, end_idx), slice(rg_borders_id[j], rg_borders_id[j + 1])
# get w_radar at the time shifted exact chirp time stamps
w_radar_exact = Cs_w_radar(chirp_ts_shifted[f'chirp_{j + 1}'][ts_slice])
# add a dimension to w_radar_exact and repeat it over this dimension (range) to fill the hour and
# chirp of the correction array
tmp = np.repeat(np.expand_dims(w_radar_exact, 1), rg_borders_id[j + 1] - rg_borders_id[j], axis=1)
corr_matrix[ts_slice, rg_slice] = tmp
return corr_matrix
def get_range_bin_borders(no_chirps, container):
"""get the range bins which correspond to the chirp borders of a FMCW radar
Args:
no_chirps (int): Number of chirps
container (dict): Dictionary with C1/2/3Range variable from LV1 files
Returns:
ndarray with chirp borders including 0 range_bins
"""
range_bins = np.zeros(no_chirps + 1, dtype=np.int) # needs to be length 4 to include all +1 chirp borders
for i in range(no_chirps):
try:
range_bins[i + 1] = range_bins[i] + container[f'C{i + 1}Range']['var'][0].shape
except ValueError:
# in case only one file is read in data["C1Range"]["var"] has only one dimension
range_bins[i + 1] = range_bins[i] + container[f'C{i + 1}Range']['var'].shape
return range_bins
def calc_heave_corr(container, chirp_ts, seapath, mean_hr=True):
"""Calculate heave correction for mean Doppler velocity
Args:
container (larda container): LIMRAD94 C1/2/3_Range, SeqIntTime, ts
chirp_ts (dict): dictionary with exact radar chirp time stamps
seapath (pd.DataFrame): Data frame with heave rate column ("heave_rate_radar")
mean_hr (bool): whether to use the mean heave rate over the SeqIntTime or the heave rate at the start time of the chirp
Returns:
heave_corr (ndarray): heave rate closest to each radar timestep for each height bin, time x range
"""
start = time.time()
####################################################################################################################
# Calculating Timestamps for each chirp
####################################################################################################################
# array with range bin numbers of chirp borders
no_chirps = len(chirp_ts)
range_bins = get_range_bin_borders(no_chirps, container)
seapath_ts = seapath.index.values.astype(np.float64) / 10 ** 9 # convert datetime index to seconds since 1970-01-01
total_range_bins = range_bins[-1] # get total number of range bins
# initialize output variables
heave_corr = np.empty(shape=(container["ts"].shape[0], total_range_bins)) # time x range
seapath_out = pd.DataFrame()
for i in range(no_chirps):
t1 = time.time()
# get integration time for chirp
int_time =
|
pd.Timedelta(seconds=container['SeqIntTime'][i])
|
pandas.Timedelta
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import pandas as pd
import pylife.mesh.meshsignal
def test_plain_mesh_3d():
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0], 'a': [9.9]})
pd.testing.assert_frame_equal(df.plain_mesh.coordinates,
pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0]}))
def test_plain_mesh_2d():
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'b': [3.0], 'a': [9.9]})
pd.testing.assert_frame_equal(df.plain_mesh.coordinates,
|
pd.DataFrame({'x': [1.0], 'y': [2.0]})
|
pandas.DataFrame
|
from tqdm import tqdm
import pandas as pd
import pickle
tqdm.pandas()
def maphex2int(collection, path="ids.pkl"):
ids = {str(id): i+1 for i, id in enumerate(
collection.find().distinct('_id'))}
with open(path, 'wb') as f:
pickle.dump(ids, f)
return ids
def database2csv(collection, path, hex2int):
df = pd.DataFrame(columns=["rater", "rated", "r"])
for i in tqdm(collection.find()):
rater = hex2int[str(i['_id'])]
right_swipes = i['liked_users']
for swipes in right_swipes:
rated = hex2int[str(swipes['swipee_id'])]
df = pd.concat(
[df,
|
pd.DataFrame([[rater, rated, 1.0]], columns=['rater', 'rated', 'r'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from gmpy2 import bit_mask
from rulelist.rulelistmodel.categoricalmodel.categoricaltarget import CategoricalTarget
from rulelist.util.bitset_operations import indexes2bitset
@pytest.fixture
def generate_dataframe_one_target():
dictoutput = {"target1": np.array(["below50" if i < 50 else "above49" for i in range(100)])}
input_target_data =
|
pd.DataFrame(data=dictoutput)
|
pandas.DataFrame
|
import warnings
from datetime import datetime
import pytest
import pandas as pd
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, custom_errors, create, conversion, conversion_rules
from mssql_dataframe.core.write import insert, _exceptions
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
self.insert = insert.insert(self.connection, autoadjust_sql_objects=True)
self.insert_meta = insert.insert(self.connection, include_metadata_timestamps=True, autoadjust_sql_objects=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
def test_insert_autoadjust_errors(sql):
table_name = "##test_insert_autoadjust_errors"
# create table with column for each conversion rule
columns = conversion_rules.rules['sql_type'].to_numpy()
columns = {'_'+x:x for x in columns}
sql.create.table(table_name, columns=columns)
# create dataframes for each conversion rule that should fail insert
boolean = [3]
exact_numeric = ['a', '2-1', 1.1, datetime.now()]
approximate_numeric = ['a', '2-1',datetime.now()]
date_time = ['a', 1, 1.1]
character_string = [1, datetime.now()]
dataframe = [
pd.DataFrame({'_bit': boolean}),
pd.DataFrame({'_tinyint': exact_numeric}),
pd.DataFrame({'_smallint': exact_numeric}),
pd.DataFrame({'_int': exact_numeric}),
pd.DataFrame({'_bigint': exact_numeric}),
pd.DataFrame({'_float': approximate_numeric}),
pd.DataFrame({'_time': date_time}),
pd.DataFrame({'_date': date_time}),
pd.DataFrame({'_datetime2': date_time}),
|
pd.DataFrame({'_varchar': character_string})
|
pandas.DataFrame
|
"""
This module contains all the technical indicators that can be applied
to a nowtrade dataset. You can use any number of these for your
strategy.
"""
import uuid
import numpy as np
import talib
import pandas as pd
from nowtrade import logger
class TechnicalIndicator(object):
"""
The base class for all technical indicators.
"""
def __init__(self):
self.logger = logger.Logger(self.__class__.__name__)
def results(self, data_frame):
"""
This needs to be implemented for all technical indicators.
All the calculations happen here.
"""
pass
class Pair(TechnicalIndicator):
"""
Pair is a helper TI created to aid in pairs trading.
Attributes:
ols -> Ordinary Least Squares of the pair
hedge_ratio -> The pair's hedge ratio
spread -> The spread between the pair
zscore -> The zscore between the pair
"""
def __init__(self, y_data, x_data, lookback):
TechnicalIndicator.__init__(self)
self.y_data = y_data
self.x_data = x_data
self.lookback = lookback
self.value = 'PAIR_%s_%s_%s' %(y_data, x_data, lookback)
self.ols = self.value
self.hedge_ratio = 'HEDGE_RATIO_%s_%s_%s' %(y_data, x_data, lookback)
self.spread = 'SPREAD_%s_%s_%s' %(y_data, x_data, lookback)
self.zscore = 'ZSCORE_%s_%s_%s' %(y_data, x_data, lookback)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return self.value
def __repr__(self):
return self.value
def results(self, data_frame):
y_value = data_frame[self.y_data]
x_value = data_frame[self.x_data]
if self.lookback >= len(x_value):
return ([self.value, self.hedge_ratio, self.spread, self.zscore], \
[pd.Series(np.nan), pd.Series(np.nan), pd.Series(np.nan), pd.Series(np.nan)])
ols_result =
|
pd.ols(y=y_value, x=x_value, window=self.lookback)
|
pandas.ols
|
from optparse import Option
from pathlib import Path
from typing import Optional
import pandas as pd
from sqlalchemy import create_engine
import typer
def main(filename : Path,
table_name : str,
username : Optional[str] = typer.Argument("root"),
password : Optional[str] = typer.Argument("root"),
host : Optional[str] = typer.Argument("localhost"),
port : Optional[str] = typer.Argument("5432"),
db : Optional[str] = typer.Argument("ny_taxi"),
):
# Use sqlalchemy to access the db
engine = create_engine(f'postgresql://{username}:{password}@{host}:{port}/{db}')
# Read 100 rows for datatypes
df = pd.read_csv(filename, nrows=100)
# Write the table structure based on the dataframe header
if 'tpep_pickup_datime' in df:
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace')
print("Inserting data into databse ...")
df_iter = pd.read_csv(filename, iterator=True, chunksize=100_000)
for df in df_iter:
if 'tpep_pickup_datime' in df:
df.tpep_pickup_datetime =
|
pd.to_datetime(df.tpep_pickup_datetime)
|
pandas.to_datetime
|
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
class TestSortValuesLevelAsStr:
def test_sort_index_level_and_column_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_values_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame({"a": [1, 2, 3]})
msg = (
r"In a future version of pandas all arguments of DataFrame\.sort_values "
r"except for the argument 'by' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.sort_values("a", 0)
expected = DataFrame({"a": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
def test_sort_values_validate_ascending_for_value_error(self):
# GH41634
df =
|
DataFrame({"D": [23, 7, 21]})
|
pandas.DataFrame
|
import pandas as pd
#Excel file directory
momo_path = "C:/Users/Peg/Desktop/mkopa-dlight may june 2019 (1).xlsx"
#Reading sheets
class Sheet:
@staticmethod
def read_MK170():
return pd.read_excel(momo_path, sheet_name='MK 170')
@staticmethod
def read_MK3456():
return pd.read_excel(momo_path, sheet_name='MK 3456')
@staticmethod
def read_MKPOP():
return pd.read_excel(momo_path, sheet_name='MK POP')
@staticmethod
def read_VodafoneMK():
return pd.read_excel(momo_path, sheet_name='Vodafone MK')
@staticmethod
def read_TigoMK():
return pd.read_excel(momo_path, sheet_name='Tigo MK')
@staticmethod
def read_DL170():
return pd.read_excel(momo_path, sheet_name='DL 170')
@staticmethod
def read_DL3456():
return pd.read_excel(momo_path, sheet_name='DL 3456')
@staticmethod
def read_DLPOP():
return pd.read_excel(momo_path, sheet_name='DL POP')
@staticmethod
def read_TigoDL():
return pd.read_excel(momo_path, sheet_name='Tigo DL')
@staticmethod
def read_VodafoneDL():
return pd.read_excel(momo_path, 'Vodafone DL')
def convert_to_datetime(frames):
#Formatting Date for all
for frame in frames:
frame['TransactionDate'] = pd.to_datetime(frame['TransactionDate'])
frame['ActivationDate'] = pd.to_datetime(frame['ActivationDate'])
frame['LastDayOfMonth'] = pd.to_datetime(frame['LastDayOfMonth'])
frame['Month'] = frame['TransactionDate'].dt.month
frame["SinceActivation"] = (frame['LastDayOfMonth']-frame["ActivationDate"]).dt.days
return frames
#Function to count the number of payment transactions by Network Operator
def payments_collected_count(sheet):
month1 = sheet[sheet.Month == 5]
month1_Payment_Count = month1.Amount.count()
return month1_Payment_Count
#Function to aggregate total amount received per Network Operator or Channel
def amounts_total(sheet):
month1 = sheet[sheet.Month == 5]
month1_amounts_total = month1.agg({"Amount": "sum"})
return month1_amounts_total[0]
def customer_class(sheet):
month1 = sheet[sheet.Month == 5]
customer_status = month1.SinceActivation <=30
customer_status = customer_status.value_counts()
new_customers = customer_status[True]
old_customers = customer_status[False]
return(new_customers, old_customers)
def channel_aggregation(ussd,star, pop):
ussd = ussd.groupby(by=['AccountNumber','Channel'])['Amount'].agg(['count', 'sum'])
pop = pop.groupby(by=['AccountNumber','Channel'])['Amount'].agg(['count', 'sum'])
star = star.groupby(by=['AccountNumber','Channel'])['Amount'].agg(['count', 'sum'])
frame = [ussd, pop, star].reset_index().rename(columns={'count':'Frequency of payment', 'sum':'Sum Paid','Channel':'Payment Method (POP/STAR/USSD)'})
frame = pd.concat(frame, sort=False)
return frame
def excute():
#Saving Read sheets as variables
MK170 = Sheet.read_MK170()
MK3456 = Sheet.read_MK3456()
MKPOP = Sheet.read_MKPOP()
VodafoneMK = Sheet.read_VodafoneMK()
TigoMK = Sheet.read_TigoMK()
DL170 = Sheet.read_DL170()
DL3456 = Sheet.read_DL3456()
DLPOP = Sheet.read_DLPOP()
TigoDL = Sheet.read_TigoDL()
VodafoneDL = Sheet.read_VodafoneDL()
#Concatenating all files into one frame
frames = [MK170,MK3456,MKPOP,VodafoneMK,TigoMK,DL170,DL3456,DLPOP,TigoDL,VodafoneDL]
#Calling to convert frames date to datetime
convert_to_datetime(frames)
#A dataframe of New Channel Users
new_channel = [MK3456,DL3456,MKPOP,DLPOP]
new_channel = pd.concat(new_channel, sort=False)
#A dataframe of Old Channel Users
ussd = [MK170,DL170,TigoDL,TigoMK,VodafoneDL,VodafoneMK]
ussd = pd.concat(ussd, sort=False)
#Star Channel
star = [MK3456,DL3456]
star = pd.concat(star, sort=False)
#POP Channel
pop = [MKPOP,DLPOP]
pop = pd.concat(pop, sort=False)
#MTN
mtn = [MK170, MK3456, MKPOP, DL170, DL3456, DLPOP]
mtn =
|
pd.concat(mtn, sort=False)
|
pandas.concat
|
import numpy as np
import pandas as pd
def main(payload):
df_list = []
for key, value in payload.items():
df =
|
pd.DataFrame(value)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(
|
CDT(categories, ordered=False)
|
pandas.api.types.CategoricalDtype
|
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import altair
import itertools
import statsmodels.api as sm
from scipy import stats
import sys
from streamlit import caching
import SessionState
import platform
import base64
from io import BytesIO
from pygam import LinearGAM, LogisticGAM, s
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity
from factor_analyzer.factor_analyzer import calculate_kmo
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
#Session state
session_state = SessionState.get(id = 0)
# Analysis type
analysis_type = st.selectbox("What kind of analysis would you like to conduct?", ["Regression", "Multi-class classification", "Data decomposition"], key = session_state.id)
st.header("**Multivariate data**")
if analysis_type == "Regression":
st.markdown("Get your data ready for powerfull methods: Artificial Neural Networks, Boosted Regression Trees, Random Forest, Generalized Additive Models, Multiple Linear Regression, and Logistic Regression! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Multi-class classification":
st.markdown("Get your data ready for powerfull multi-class classification methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Data decomposition":
st.markdown("Decompose your data with Principal Component Analysis or Factor Analysis! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.beta_expander('Upload settings')
with separator_expander:
a4,a5=st.beta_columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = session_state.id)
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = session_state.id)
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = session_state.id)
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = session_state.id)
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = session_state.id)
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = session_state.id)
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
else:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
# Check if enough data is available
if n_rows > 0 and n_cols > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
data_exploration_container = st.beta_container()
with data_exploration_container:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.beta_expander("Explore raw data info and stats ", expanded = False)
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from the Gallup World Poll surveys from 2018 to 2020. For more details see the [World Happiness Report 2021] (https://worldhappiness.report/).")
st.markdown("**Citation:**")
st.markdown("Helliwell, <NAME>., <NAME>, <NAME>, and <NAME>, eds. 2021. World Happiness Report 2021. New York: Sustainable Development Solutions Network.")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("Country")
col2.write("country name")
col1,col2=st.beta_columns(2)
col1.write("Year ")
col2.write("year ranging from 2005 to 2020")
col1,col2=st.beta_columns(2)
col1.write("Ladder")
col2.write("happiness score or subjective well-being with the best possible life being a 10, and the worst possible life being a 0")
col1,col2=st.beta_columns(2)
col1.write("Log GDP per capita")
col2.write("in purchasing power parity at constant 2017 international dollar prices")
col1,col2=st.beta_columns(2)
col1.write("Social support")
col2.write("the national average of the binary responses (either 0 or 1) to the question regarding relatives or friends to count on")
col1,col2=st.beta_columns(2)
col1.write("Healthy life expectancy at birth")
col2.write("based on the data extracted from the World Health Organization’s Global Health Observatory data repository")
col1,col2=st.beta_columns(2)
col1.write("Freedom to make life choices")
col2.write("national average of responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Generosity")
col2.write("residual of regressing national average of response to the question regarding money donations in the past month on GDP per capita")
col1,col2=st.beta_columns(2)
col1.write("Perceptions of corruption")
col2.write("the national average of the survey responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Positive affect")
col2.write("the average of three positive affect measures (happiness, laugh and enjoyment)")
col1,col2=st.beta_columns(2)
col1.write("Negative affect (worry, sadness and anger)")
col2.write("the average of three negative affect measures (worry, sadness and anger)")
st.markdown("")
if analysis_type == "Multi-class classification":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from Fisher's Iris data set. See [here] (https://archive.ics.uci.edu/ml/datasets/iris) for more information.")
st.markdown("**Citation:**")
st.markdown("<NAME>. (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7(2): 179–188. doi: [10.1111/j.1469-1809.1936.tb02137.x] (https://doi.org/10.1111%2Fj.1469-1809.1936.tb02137.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("class_category")
col2.write("Numerical category for 'class': Iris Setosa (0), Iris Versicolour (1), and Iris Virginica (2)")
col1,col2=st.beta_columns(2)
col1.write("class")
col2.write("Iris Setosa, Iris Versicolour, and Iris Virginica")
col1,col2=st.beta_columns(2)
col1.write("sepal length")
col2.write("sepal length in cm")
col1,col2=st.beta_columns(2)
col1.write("sepal width")
col2.write("sepal width in cm")
col1,col2=st.beta_columns(2)
col1.write("petal length")
col2.write("petal length in cm")
col1,col2=st.beta_columns(2)
col1.write("petal width")
col2.write("petal width in cm")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data ", value = False, key = session_state.id):
st.write(df)
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info ", value = False, key = session_state.id)
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info ', value = False, key = session_state.id):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data) ', value = False, key = session_state.id):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
#st.subheader("Data processing")
dev_expander_dm_sb = st.beta_expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.beta_columns(3)
else: a1, a3 = st.beta_columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete ", df.columns, key = session_state.id)
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.columns, key = session_state.id)
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin(sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows ", ["No", "Yes"], key = session_state.id)
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs ", ["No", "Yes"], key = session_state.id)
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = session_state.id)
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = session_state.id)
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA ", ["No", "Yes"], key = session_state.id)
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables ", ["Mean", "Median", "Random value"], key = session_state.id)
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables ", ["Mode", "Random value"], key = session_state.id)
df = fc.data_impute(df, sb_DM_dImp_num, sb_DM_dImp_other)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.select_dtypes([np.number]).columns
numCat_options = df.columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log ", transform_options, key = session_state.id)
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt ", transform_options, key = session_state.id)
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring ", transform_options, key = session_state.id)
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = session_state.id)
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization ", transform_options, key = session_state.id)
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization ", transform_options, key = session_state.id)
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = session_state.id)
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = session_state.id)
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = session_state.id)
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = session_state.id)
mult_var2 = st.selectbox(text2, transform_options, key = session_state.id)
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = session_state.id)
div_var2 = st.selectbox(text2, transform_options, key = session_state.id)
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences ', value = False, key = session_state.id):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.beta_expander("Explore cleaned and transformed data info and stats ", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 0 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data ", value = False, key = session_state.id):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed) ", value = False, key = session_state.id)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if
|
pd.isnull(df[c][r])
|
pandas.isnull
|
import pandas as pd
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
import seaborn as sns
def get_substrate_info(substrate_string, colname, carbo_df):
"""Get values in a column of the carbohydrates spreadsheet based on a string-list of substrates.
Parameters:
substrate_string (str): list of substrates represented as a string with each value separated by "; "
colname (str): name of the column in the carbohydrates spreadsheet to access
carbo_df: dataframe of carbohydrates
Returns:
str: "; "-separated set (no repeats) of items in the column specified by colname for the rows specified by substrate_string
float: np.nan is returned if th
"""
if not pd.isna(substrate_string):
substrates = substrate_string.split('; ')
substrate_info = []
for substrate in substrates:
info = carbo_df[carbo_df['name'] == substrate][colname].values[0]
if not pd.isna(info):
info = info.split('; ')
substrate_info += info
if substrate_info:
substrate_info = '; '.join(set(substrate_info))
else:
substrate_info = np.nan
return(substrate_info)
def get_substrates_and_activities_from_ECs(ECs, EC_df):
"""Get substrates and activities from a "; "-separated string list of EC numbers
Parameters:
ECs (str): list of ECs represented as a string with each value separated by "; "
EC_df (DataFrame): table of EC values with substrates and activities
Returns:
tuple: substrates and activities, each either a "; "-separated string list or np.nan if no substrate/activity was available
"""
substrates = []
activities = []
if not pd.isna(ECs):
ECs = ECs.split('; ')
for EC in ECs:
EC_substrates = EC_df.loc[EC_df['EC'] == EC]['substrates'].values[0]
EC_activities = EC_df.loc[EC_df['EC'] == EC]['activity'].values[0]
if not
|
pd.isna(EC_substrates)
|
pandas.isna
|
#!flask/bin/python
from flask import Flask, request
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
app = Flask(__name__)
filenameMouseKeyboard = 'MouseKey.sav'
filenameBrowserInfo = 'BrowserInfo.sav'
TEST_USERNAME = 'senthuran'
def replaceBrackets(line):
brackets = "{}"
for char in brackets:
line = line.replace(char, "")
return line;
def convertStringToNumberList(list):
for keys in list:
try:
list[keys] = int(list[keys])
except ValueError:
list[keys] = float(list[keys])
return list;
@app.route('/userDynamics', methods=['POST', 'GET'])
def userDynamics():
combinedEvent = replaceBrackets(request.get_json()['mouseEvent']) + ", " + replaceBrackets(
request.get_json()['keyboardEvent']);
res = dict(item.split("=") for item in combinedEvent.split(", "))
numberList = convertStringToNumberList(res)
new_input = pd.DataFrame(numberList, index=[0])
# load the model from disk
loaded_model = pickle.load(open(filenameMouseKeyboard, 'rb'))
if (request.get_json()['usernameOREmail'] in loaded_model.classes_.tolist()):
index = loaded_model.classes_.tolist().index(request.get_json()['usernameOREmail'])
else:
return str(0)
# print('userDynamics',loaded_model.predict_proba(new_input),loaded_model.classes_)
if(request.get_json()['usernameOREmail'] == TEST_USERNAME):
probability = 80.0
else:
probability = loaded_model.predict_proba(new_input)[0][index]
return str(probability);
@app.route('/browserInfo', methods=['POST', 'GET'])
def browserInfo():
browserData = pd.read_csv("/home/senthuran/Desktop/Movies/BrowserInfo11.csv")
X = browserData.drop('username', axis=1)
y = browserData['username']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
cat_attribs = ['name', 'version', 'os', 'CPU', 'TimeZone', 'Resolution', 'ColorDepth', 'UserAgentHash',
'country_code', 'country_name', 'state', 'city', 'IPv4']
full_pipeline = ColumnTransformer([('cat', OneHotEncoder(handle_unknown='ignore'), cat_attribs)],
remainder='passthrough')
encoder = full_pipeline.fit(X_train)
browserInfo = replaceBrackets(request.get_json()['browserInfo'])
new_input = dict(item.split("=") for item in browserInfo.split(", "))
new_input.pop('type')
new_input =
|
pd.DataFrame(new_input, index=[0])
|
pandas.DataFrame
|
import pandas as pd
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import finterstellar as fs
pd.plotting.deregister_matplotlib_converters()
font = 'NanumSquareRound, AppleGothic, Malgun Gothic, DejaVu Sans'
class Visualize:
today = '(' + pd.to_datetime('today').date().strftime("%y%m%d") + ') '
today_str = pd.to_datetime('today').date().strftime("%Y%m%d")
def __init__(self):
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = font
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['grid.linestyle'] = '--'
plt.rcParams['grid.alpha'] = 0.7
plt.rcParams['lines.antialiased'] = True
plt.rcParams['figure.figsize'] = [15.0, 7.0]
plt.rcParams['savefig.dpi'] = 96
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['figure.titlesize'] = 'medium'
def price_view(self, df, b_date, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
for c in cds:
plt.plot(x, df.loc[b_date:, c], label=c)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' price_view.png', bbox_inches='tight')
def index_view(self, df, b_date, cd, size=(15,7), make_file=False):
if isinstance(df.index[0], dt.date):
b_date = fs.check_base_date(df, b_date)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
cds = fs.str_list(cd)
for c in cds:
plt.plot(x, df.loc[b_date:, c] / df.loc[b_date, c] * 100, label=c)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' index_view.png', bbox_inches='tight')
def complex_view(self, df, b_date, cd_a, cd_b, size=(15,7), make_file=False):
cds_a = fs.str_list(cd_a)
cds_b = fs.str_list(cd_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.fill_between(x, df.loc[b_date:, c], 0, facecolor='C'+str(i), alpha=0.3)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+self.today+cds_a[0]+' complex_view.png', bbox_inches='tight')
def multi_line_view(self, df, b_date, cd_a, cd_b, size=(15,7), make_file=False):
cds_a = fs.str_list(cd_a)
cds_b = fs.str_list(cd_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
pass
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c, alpha=0.7)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+self.today+cds_a[0]+' multi_line_view.png', bbox_inches='tight')
def position_view(self, df, cd, size=(15,1), make_file=False, file_name=''):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.fill_between(x, df['ps'+c], 0, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
plt.legend()
if make_file:
f_name = file_name+'_position_view.png'
plt.savefig('./image/'+f_name, bbox_inches='tight')
def position_view_bar(self, df, cd, size=(15,1), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
x_ticks = self.time_serial(df)
plt.xticks(x_ticks[0], x_ticks[1])
plt.autoscale(True, axis='x')
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.bar(range(x.size), df['ps'+c], width=1, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' position_view.png', bbox_inches='tight')
def pair_trend_index_view(self, df, trd, cd, size=(15,7), make_file=False, file_name=''):
fig, ax1 = plt.subplots(figsize=size)
x = df.index
ax1.fill_between(x, df[cd[1]+' expected']*(1+trd), df[cd[1]+' expected']*(1-trd), facecolor='sienna', alpha=0.2)
ax1.plot(x, df[cd[1]+' expected'], 'sienna', linestyle='--')
ax1.plot(x, df[cd[1]], 'C1', lw=3)
ax2 = ax1.twinx()
ax2.plot(x, df[cd[0]], 'C0', alpha=0.7)
ax1.plot(np.nan, 'C0', label=cd[0])
ax1.legend(loc=0)
if make_file:
f_name = file_name+'_pair_trend_view.png'
plt.savefig('./image/'+f_name, bbox_inches='tight')
return()
def pair_trend_price_view(self, df, trd, cd, size=(15,7), make_file=False):
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df[cd[1]+' expected']*(1+trd), df[cd[1]+' expected']*(1-trd), facecolor='sienna', alpha=0.2)
plt.plot(x, df[cd[1]+' expected'], 'sienna', linestyle='--')
plt.plot(x, df[cd[0]], 'C0')
plt.plot(x, df[cd[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cd[0]+' pair_trend_price_view.png', bbox_inches='tight')
def BB_trend_view(self, df, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df['lb'], df['ub'], facecolor='sienna', alpha=0.2)
plt.plot(x, df['center'], color='sienna', linestyle='--', label='MA')
plt.plot(x, df[cds[0]], color='C0', linestyle='-', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' bb_trend_view.png', bbox_inches='tight')
def futures_basis_view(self, df, threshold, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.autoscale(True, axis='both')
plt.fill_between(x, df[cds[0]], df[cds[0]]+df['basis'], facecolor='sienna', alpha=0.2)
plt.plot(x, df[cds[0]], 'sienna', linestyle='--')
plt.plot(x, df[cds[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' futures_basis_view.png', bbox_inches='tight')
def value_at_expiry_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s +
|
pd.Series(value)
|
pandas.Series
|
# packages
import pandas as pd
import numpy as np
#from sodapy import Socrata
import os
import json, requests
from datetime import datetime
# display all columns and rows
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# ===============================================================================================
# pull rideshare data from Chicago's open data API by week
# lists of start and end dates to search
list_of_start_dates = ["01T00:00:00.000", "08T00:00:00.000", "15T00:00:00.000", "22T00:00:00.000", "29T00:00:00.000"]
list_of_end_dates = ["07T23:45:00.000", "14T23:45:00.000", "21T23:45:00.000", "28T23:45:00.000", "30T23:45:00.000"]
# printing date spans
for start, end in zip(list_of_start_dates, list_of_end_dates):
print("start: " + start + " --- " + "end: " + end)
# since there are millions of records in the month of November 2018, this for loop will iterate over
# individual weeks to make the process more manageable. The result will be five CSV files containing
# the data for the span of days defined in the start and end dates listed above
for start, end in zip(list_of_start_dates, list_of_end_dates):
# define the URL to query the API for the count of records between dates
# this will allow the search URL to pull all the records for the time span
count_request = f"https://data.cityofchicago.org/resource/m6dm-c72p.json?$select=count(trip_start_timestamp)\
&$where=trip_start_timestamp%20between%20%272018-11-{start}%27%20and%20%272018-11-{end}%27"
# use python's requests packages to make the request
count_json = requests.get(count_request)
# print count_json to see if request succeeded; if it succeeded response == 200
print(count_json)
# extract the count value from the json
count = [value for value in count_json.json()[0].values()]
count = pd.to_numeric(count[0])
# define search url (includes start, end, and count) to request the week's data
search = f"https://data.cityofchicago.org/resource/m6dm-c72p.json?$where=trip_start_timestamp between '2018-11-{start}'\
and '2018-11-{end}'&$limit={count}"
# request data (will be received in json format)
r = requests.get(search)
# convert json to dataframe
tmp_df = pd.DataFrame(r.json())
# remove unnecessary cols to save some memory
tmp_df = tmp_df.drop(["dropoff_centroid_location", "pickup_centroid_location"], axis = 1)
# convert "trip_end_timestamp" and "trip_start_timestamp" to datetime64 object
tmp_df["trip_end_timestamp"] =
|
pd.to_datetime(tmp_df["trip_end_timestamp"])
|
pandas.to_datetime
|
import os
import os, re
import pandas as pd
import numpy as np
import json
import networkx
import obonet
import pandas as pd
import tarfile
import gzip, shutil, os, re
import time
from pyorthomap import findOrthologsHsMm, findOrthologsMmHs
from GEN_Utils import FileHandling
from loguru import logger
from utilities.decorators import ProgressBar
logger.info(f'Import OK')
resource_folder = 'resources/bioinformatics_databases/'
def gz_unzipper(filename, input_path=resource_folder, output_path=resource_folder):
with gzip.open(f'{input_path}{filename}.gz', 'rb') as f_in:
with open(f'{output_path}{filename}', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def tar_file_to_folder(input_path, output_path):
tar = tarfile.open(f'{input_path}', 'r')
tar.extractall(f'{output_path}')
tar.close()
def go_lineage_tracer(go_term, obo_path, alt_ids=False, direct=False):
"""Return all nodes underneath (i.e. all children) of go_term of interest.
Default is to collect entire tree; if stopping at 'direct',
then only direct decendants are collected."""
# Read the ontology
graph = obonet.read_obo(obo_path)
# Collect all nodes into child:parents dataframe
children = {}
for node in graph.nodes:
try:
children[node] = graph.nodes[node]['is_a']
except:
children[node] = []
child_terms = pd.DataFrame()
child_terms['child_term'] = list(children.keys())
child_terms['parents'] = [';'.join(terms) for terms in list(children.values())]
# collect any alternate ids for go term
search_list = []
if alt_ids:
try:
search_list.append(graph.nodes[go_term]['alt_id'])
except:
pass
search_list = [go_term] + [item for sublist in search_list for item in sublist]
# Collect all children where term of interest is parent
def search_terms(search_list, term_family=[], direct=False):
term_family.append(search_list)
search_list = '|'.join(search_list)
terms = child_terms[child_terms['parents'].str.contains(search_list)]['child_term'].tolist()
if direct:
return terms
if len(terms) > 0:
search_terms(terms, term_family)
return [item for sublist in term_family for item in sublist]
# collect list of terms of interest
family = search_terms(search_list, direct=False)
return family
def uniprot_go_genes(tax_id, go_term, resource_folder=resource_folder, child_terms=True, direct=False, output='list'):
"""Collect all genes from annotated (reviewed) uniprot database containing the GO term of interest.
tax_id: uniprot id corresponding to saved databases in resources folder e.g. '10090', '9606'
go_term: term id e.g. 'GO:0032991'
resource_folder: directory to where stored databases are
child_terms: default(True) collects all terms for which the term if interest is a parent
direct: default(false) limits child terms to direct descendents i.e. child term 'is_a' go_term
output: default(list) choose type of output from 'list' ('Entry' ids), 'df' (complete genes df) or directory (save)"""
# read in uniprot database for the species, with xref details
uniprot_database = uniprot_summary(tax_id=tax_id, resource_folder=resource_folder, reviewed=True)
genes = uniprot_database.dropna(subset=['GO']) # 16525/17474 = 95% have annotated GO terms
# collect search terms according to optional child terms and direct lineage
if child_terms:
search_terms = go_lineage_tracer(go_term, obo_path=f'{resource_folder}PANTHERGOslim.obo', alt_ids=False, direct=direct)
search_terms = '|'.join(search_terms)
else:
search_terms = go_term
# Collect all genes with ontology_id
gene_list = genes[genes['GO'].str.contains(search_terms)]
# generate output
if output == 'list':
return gene_list['Entry'].tolist()
elif output == 'df':
return gene_list
else:
logger.info('Output format not detected. Attempting output to path.')
gene_list.to_csv(output)
def ontology_wordfinder(words, obo_path=f'{resource_folder}PANTHERGOslim.obo', resource_folder=resource_folder):
"""Retrieves all ontology terms containing 'words' in the name.
words: list of words to search
return: df of term, name matches"""
# Read the ontology
graph = obonet.read_obo(obo_path)
# Collect term_ids, term names
terms = [node for node in graph.nodes]
names = [graph.nodes[node]['name'] for node in terms]
terms = pd.DataFrame([terms, names], index=['go_term', 'go_name']).T
# Collect only terms containing name of interest
search_words = '|'.join(words)
return terms[terms['go_name'].str.contains(search_words)]
def go_term_details(go_terms, obo_path=f'{resource_folder}PANTHERGOslim.obo', resource_folder=resource_folder):
"""Retrieves details for all go terms as df.
go_terms: list of term_ids to search
return: df of term, name matches"""
# Read the ontology
graph = obonet.read_obo(obo_path)
# Generate df for go_term
cleaned_terms = []
for go_term in go_terms:
try:
test_df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in graph.nodes[go_term].items() ]))
df = pd.DataFrame([';'.join(test_df[col].dropna()) for col in test_df.columns.tolist()], index=test_df.columns.tolist()).T
df['go_id'] = go_term
cleaned_terms.append(df)
except:
pass
cleaned_terms = pd.concat(cleaned_terms)
return cleaned_terms
def go_uniprot_proteins(protein_names, tax_id, resource_folder=resource_folder, name_type= 'Entry', output='list'):
"""For any given gene, pull out the associated GO terms annotated in uniprot as a list"""
# read in uniprot database for the species, with xref details
uniprot_database = uniprot_summary(tax_id=tax_id, resource_folder=resource_folder, reviewed=True)
gene_details = uniprot_database[uniprot_database[name_type].isin(protein_names)]
# generate output
if output == 'list':
return gene_details['GO'].tolist()
elif output == 'df':
return gene_details
else:
logger.info('Output format not detected')
def taxonomy_id(uniprot_tax_ids, resource_folder):
species = pd.read_table(f'{resource_folder}orthodb_v10.1/odb10v1_species.tab.gz', compression='gzip', header=None)
species.columns = ['ncbi_tax_id', 'ortho_tax_id', 'organism_name', 'genome_assembly_id', 'ortho_gene_count', 'ortho_group_count', 'mapping_type']
species_dict = dict(zip(species['ncbi_tax_id'], species['ortho_tax_id']))
return [species_dict[ncbi_id] for ncbi_id in uniprot_tax_ids]
@ProgressBar(step=1/41)
def create_genes(resource_folder, ortho_tax_ids):
try:
genes = pd.read_excel(f'{resource_folder}{"_".join(ortho_tax_ids)}_genes.xlsx')
genes.drop([col for col in genes.columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True)
yield
except:
logger.info('File not found. Processing database.')
gene_chunks = pd.read_table(f'{resource_folder}orthodb_v10.1/odb10v1_genes.tab.gz', compression='gzip', chunksize=1000000, header=None)
genes = []
for df in gene_chunks:
genes.append(df[df[1].isin(ortho_tax_ids)]) # note this relies on the order of the columns - see ortho README
yield
genes = pd.concat(genes)
genes.columns = ['ortho_gene_id', 'ortho_organism_id', 'original_id', 'synonyms', 'mapped_uniprot_id', 'mapped_ensembl_ids', 'ncbi_gene_name', 'mapped_description']
for tax_id in ortho_tax_ids:
logger.info(f'{len(genes[genes["ortho_organism_id"] == tax_id])} genes found for {tax_id}')
FileHandling.df_to_excel(f'{resource_folder}{"_".join(ortho_tax_ids)}_genes.xlsx', sheetnames=['all_genes'], data_frames=[genes])
return genes
@ProgressBar(step=1/196)
def create_og2genes(resource_folder, ortho_tax_ids):
try:
og2genes = pd.read_excel(f'{resource_folder}{"_".join(ortho_tax_ids)}_go2genes.xlsx')
og2genes.drop([col for col in og2genes.columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True)
yield
except:
logger.info('File not found. Processing database.')
og2genes_chunks =
|
pd.read_table(f'{resource_folder}orthodb_v10.1/odb10v1_OG2genes.tab.gz', compression='gzip', header=None, chunksize=1000000)
|
pandas.read_table
|
#!/usr/bin/env python3
# Pancancer_Aberrant_Pathway_Activity_Analysis scripts/targene_count_heatmaps.py
import os
import sys
import pandas as pd
import argparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'papaa'))
from tcga_util import add_version_argument
parser = argparse.ArgumentParser()
add_version_argument(parser)
parser.add_argument('-g', '--genes', default= 'ERBB2,PIK3CA,KRAS,AKT1',
help='string of the genes to extract or gene list file')
parser.add_argument('-p', '--path_genes',
help='pathway gene list file')
parser.add_argument('-s', '--classifier_decisions',
help='string of the location of classifier decisions file with predictions/scores')
parser.add_argument('-x', '--x_matrix', default=None,
help='Filename of features to use in model')
parser.add_argument( '--filename_mut', default=None,
help='Filename of sample/gene mutations to use in model')
parser.add_argument( '--filename_mut_burden', default=None,
help='Filename of sample mutation burden to use in model')
parser.add_argument( '--filename_sample', default=None,
help='Filename of patient/samples to use in model')
parser.add_argument( '--filename_copy_loss', default=None,
help='Filename of copy number loss')
parser.add_argument( '--filename_copy_gain', default=None,
help='Filename of copy number gain')
parser.add_argument( '--filename_cancer_gene_classification', default=None,
help='Filename of cancer gene classification table')
args = parser.parse_args()
# Load Constants
alt_folder = args.classifier_decisions
rnaseq_file = args.x_matrix
mut_file = args.filename_mut
sample_freeze_file = args.filename_sample
cancer_gene_file = args.filename_cancer_gene_classification
copy_loss_file = args.filename_copy_loss
copy_gain_file = args.filename_copy_gain
mutation_burden_file = args.filename_mut_burden
mutation_df = pd.read_table(mut_file, index_col=0)
sample_freeze =
|
pd.read_table(sample_freeze_file, index_col=0)
|
pandas.read_table
|
import pandas as pd
import requests
import json
import csv
import time
import glob
import os
import math
import re
import string
import itertools
import inspect
from calendar import isleap
#import seaborn as sns
import matplotlib.font_manager as fm
from dateutil.parser import parse
from datetime import datetime, timedelta, date
from pandas.tseries.holiday import get_calendar
from matplotlib import *
from matplotlib import ticker
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cm as cm #color map
import matplotlib.gridspec as gridspec
from scipy.stats.kde import gaussian_kde
from scipy.stats.stats import pearsonr
from pandas.tseries.offsets import CustomBusinessDay
from numpy import linspace
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
import getpass
from os.path import expanduser
from read_predictions import readRuns, readInputs
from read_measurements import readGas, readSTM, readSubmetering
def main():
BuildingList = ['05_MaletPlaceEngineering', '01_CentralHouse', '02_BuroHappold_17',
'03_BuroHappold_71'] # Location of DATA
BuildingHardDisk = ['05_MaletPlaceEngineering_Project', '01_CentralHouse_Project', '02_BuroHappold_17',
'03_BuroHappold_71']
DataFilePaths = ['MPEB', 'CentralHouse', '17', '71'] # Location of STM data and file naming
BuildingLabels = ['MPEB', 'CH', 'Office 17', 'Office 71']
BuildingAbbreviations = ['MPEB', 'CH', '17', '71', 'Nothing']
FloorAreas = [9579, 5876, 1924, 1691]
building_num = 1 # 0 = MPEB, 1 = CH, 2 = 17, 3 = 71
base_case = False # only show a single run from the basecase, or multiple runs (will change both import and plotting)
simplification = True # compare simplifications to base_case and compare_models is True
compare_models = True # if True, compare calibrated or simplification models with basecase
calibrated_case = False # if both base_case and calibrated_case are true and compare_basecase_to_calibration is False, it will show data only on claibrated ccase..
parallel_simulation = False
compare_basecase_to_calibration = False
loadshape_benchmarking = False
compare_weather = False # compare weatherfiles for CH and MPEB, compare models need to be true as well
NO_ITERATIONS = 20
time_step = 'month' # 'year', 'month', 'day', 'hour' # this is for loading and plotting the predicted against measured data.
end_uses = False
include_weekdays = False # to include weekdays in the targets/runs for both surrogate model creation and/or calibration, this is for surrogatemodel.py
write_data = False
for_sensitivity = True
building_abr = BuildingAbbreviations[building_num]
datafile = DataFilePaths[building_num]
building = BuildingList[building_num]
building_harddisk = BuildingHardDisk[building_num]
building_label = BuildingLabels[building_num]
floor_area = FloorAreas[building_num]
DataPath_model_real = start_path + 'OneDrive - BuroHappold\EngD_hardrive backup/UCL_DemandLogic/' + building_harddisk + '/ParallelSimulation/'
DataPath = start_path+'OneDrive - BuroHappold/01 - EngD/07 - UCL Study/'
DataPathSTM = start_path+'OneDrive - BuroHappold/01 - EngD/07 - UCL Study/MonitoringStudyUCL/'
if building_num in {0, 1}: # Does it have short term monitoring?
df_stm = readSTM(DataPathSTM, building, building_num, write_data, datafile,
floor_area) # read short term monitoring
else:
df_stm =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 14:40:10 2019
@author: lsx
"""
import pandas as pd
import numpy as np
import os
import sensomicsbandV123 as sob
import matplotlib.pyplot as plt
cols = ['time', 'hr']
### Read data
f_datapath = './data/sens/3-14/3-14-fitbit/fitbit2019-3-14-1.xls'
f_data = pd.read_excel(f_datapath, header=None)
f_data.columns = cols
f_data = f_data[::-1]
f_data = f_data.reset_index(drop=True)
#inter = pd.DataFrame([None]*7)
#hrf = pd.DataFrame()
#for hrf_i in f_data['hr']:
# tmp = pd.concat([pd.DataFrame([hrf_i]),inter], axis=0)
# hrf = pd.concat([hrf, tmp], axis=0)
#hrf = hrf.reset_index(drop=True)
def miss_value(f_data):
miss_all = []
for i in range(len(f_data)-1):
miss = int((f_data['time'][i+1] - f_data['time'][i]).seconds)
miss_all.append((miss)-1)
return miss_all
f_miss = miss_value(f_data)
def padding(f_data,f_miss):
hrf = pd.DataFrame()
hr = f_data['hr']
for i in range(len(hr)-1):
pad_value = pd.DataFrame([None]*f_miss[i])
tmp = pd.concat([pd.DataFrame([hr[i]]),pad_value],axis=0)
hrf = pd.concat([hrf,tmp],axis=0)
hrf = pd.concat([hrf,pd.DataFrame([hr[len(hr)-1]])])
return hrf
hrf = padding(f_data,f_miss)
hrf.columns = ['hr']
hrf = hrf.reset_index(drop=True)
filepath = './data/sens/3-14/3-14-sense'
filelist = os.listdir(filepath)
hrs = pd.DataFrame()
raw = pd.DataFrame()
for file in filelist[0:12]:
print(file)
path = os.path.join(filepath, file)
tmp, temp, error = sob.parser(path, mertic='sec')
hrs = pd.concat([hrs, temp])
raw = pd.concat([raw, tmp])
hrs_ = hrs.reset_index(drop=True)
hrs = hrs_.drop(['time'], axis=1)
inter = pd.DataFrame([None]*59)
gnd = [65,63,65,64,64,64,66,66,65,67,65,64,70,65,70,66]
hrg = pd.DataFrame()
for hrg_i in gnd:
tmp = pd.concat([pd.DataFrame([hrg_i]),inter], axis=0)
hrg = pd.concat([hrg, tmp], axis=0)
hrg = hrg.reset_index(drop=True)
record_time = ['16-00-00', '16-04-00', '16-08-00', '16-12-00']
record_loc = [0, 240, 480, 720]
plt.figure()
#hr = pd.concat([hrs[0:959], hrf[0:959], hrg[0:959]], axis=1)
hr = pd.concat([hrs[0:605], hrf[0:605], hrg[0:605]], axis=1)
hr.columns = ['hrs', 'hrf', 'hrg']
plt.figure()
plt.plot(hr['hrs'], 'g')
plt.plot(hr['hrf'].dropna(), 'b*')
plt.plot(hr['hrg'].dropna(), 'rs')
plt.xlabel('Seconds', fontsize=15)
plt.yticks(fontsize=12)
plt.ylabel('Heartrate/bpm', fontsize=15)
plt.legend(labels = ['Sens', 'Fitbit', 'GND'], loc = 'upper left')
#plt.annotate(r'$2x+1=%s$' % y0, xy=(x0, y0), xycoords='data', xytext=(+30, -30),
# textcoords='offset points', fontsize=10,
# arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=.2"))
plt.annotate('16-00-00', xy=(0, 65),
xycoords='data', xytext=(+10, +20),
textcoords='offset points', fontsize=12, va='bottom', ha='left',
arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=.5"))
plt.annotate('16-04-00', xy=(240, 64),
xycoords='data', xytext=(+10, -25),
textcoords='offset points', fontsize=12, va='top', ha='left',
arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=-.1"))
plt.annotate('16-08-00', xy=(480, 65),
xycoords='data', xytext=(+10, -30),
textcoords='offset points', fontsize=12, va='top', ha='left',
arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=-.1"))
plt.figure()
hr = pd.concat([hrs[600:959], hrf[600:959], hrg[600:959]], axis=1)
hr = hr.reset_index(drop=True)
hr.columns = ['hrs', 'hrf', 'hrg']
plt.figure()
plt.plot(hr['hrs'], 'g')
plt.plot(hr['hrf'].dropna(), 'b*')
plt.plot(hr['hrg'].dropna(), 'rs')
plt.xlabel('Seconds', fontsize=15)
plt.yticks(fontsize=12)
plt.ylabel('Heartrate/bpm', fontsize=15)
plt.legend(labels = ['Sens', 'Fitbit', 'GND'], loc = 'upper left')
plt.annotate('16-12-00', xy=(120, 70),
xycoords='data', xytext=(-20, +30),
textcoords='offset points', fontsize=12, va='top', ha='left',
arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=-.1"))
plt.annotate('16-14-00', xy=(240, 70),
xycoords='data', xytext=(-5, +30),
textcoords='offset points', fontsize=12, va='top', ha='left',
arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=-.1"))
###
###
###
cols = ['time', 'hr']
### Read data
f_datapath = './data314/3-14-fitbit/fitbit2019-3-14-2.csv'
f_data = pd.read_csv(f_datapath, header=None)
f_data.columns = cols
f_data = f_data[::-1]
f_data = f_data.reset_index(drop=True)
inter =
|
pd.DataFrame([None]*7)
|
pandas.DataFrame
|
import pandas as pd
import re
PLAYER_NAME = "<NAME>"
RESULT_FILENAME = "data/wta/matches_Simona_Halep.csv"
COLUMNS = ['best_of', 'draw_size', 'loser_age', 'loser_entry', 'loser_hand', 'loser_ht', 'loser_id',
'loser_ioc', 'loser_name', 'loser_rank', 'loser_rank_points', 'loser_seed', 'match_num',
'minutes', 'round', 'score', 'surface', 'tourney_date', 'tourney_id', 'tourney_level',
'tourney_name', 'winner_age', 'winner_entry', 'winner_hand', 'winner_ht', 'winner_id',
'winner_ioc', 'winner_name', 'winner_rank', 'winner_rank_points', 'winner_seed']
TWO_SET_1 = '^(6-[0-4] 6-[0-4])'
TWO_SET_2 = '^((6-[0-4] 7-[5-6](\([0-9]\))?)|(7-[5-6](\([0-9]\))? 6-[0-4]))'
TWO_SET_3 = '^(7-[5-6](\([0-9]\))? 7-[5-6](\([0-9]\))?)'
THREE_SET_1 = '^((([6-7]-[0-6](\([0-9]\))? [0-6]-[6-7](\([0-9]\))? 6-[0-3]))|' \
'([0-6]-[6-7](\([0-9]\))? [6-7]-[0-6](\([0-9]\))? 6-[0-3]))'
THREE_SET_2 = '^((([6-7]-[0-6](\([0-9]\))? [0-6]-[6-7](\([0-9]\))? ([6-9]|[0-9]{2})-([4-9]|[0-9]{2})(\([0-9]\))?))|' \
'([0-6]-[6-7](\([0-9]\))? [6-7]-[0-6](\([0-9]\))? ([6-9]|[0-9]{2})-([4-9]|[0-9]{2})(\([0-9]\))?))'
two_set_1_regex = re.compile(TWO_SET_1)
two_set_2_regex = re.compile(TWO_SET_2)
two_set_3_regex = re.compile(TWO_SET_3)
three_set_1_regex = re.compile(THREE_SET_1)
three_set_2_regex = re.compile(THREE_SET_2)
ROUND_ORDERING = ['RR', 'R128', 'R64', 'R32', 'R16', 'QF', 'SF', 'F']
def encode_score(score):
global two_set_regex
global three_set_regex
if score == 'W/O':
# walkover
return score
elif 'RET' in score:
# retired match
return 'RET'
elif two_set_1_regex.match(score):
# 2 set match - first category - easy win
return 'EASY_WIN'
elif two_set_2_regex.match(score):
# 2 set match - second category - medium win
return 'MEDIUM_WIN'
elif two_set_3_regex.match(score):
# 2 set match - third category - hard win
return 'HARD_WIN'
elif three_set_1_regex.match(score):
# 3 set match - second category - medium win
return 'MEDIUM_WIN'
elif three_set_2_regex.match(score):
# 3 set match = third category - hard win
return 'HARD_WIN'
else:
return 'OTHER'
def get_last_meetings_score(player_df):
for index, row in player_df.iterrows():
opponent_name = row['opponent_name']
match_date = row['tourney_date']
previous_meetings = player_df[(player_df.opponent_name == opponent_name) & (player_df.tourney_date < match_date)]
if not previous_meetings.empty:
previous_meetings.sort_values('tourney_date', inplace=True, ascending=False)
player_df.at[index,'previous_meeting_score'] = previous_meetings.iloc[0]['score_category']
player_df.at[index, 'previous_meeting_tourney'] = previous_meetings.iloc[0]['tourney_name']
player_df.at[index, 'previous_meeting_tourney_id'] = previous_meetings.iloc[0]['tourney_id']
player_df.at[index, 'previous_meeting_tourney_date'] = previous_meetings.iloc[0]['tourney_date']
# print(previous_meetings)
# player_df.drop('score', inplace=True, axis=1)
return player_df
#
# def get_last_player_match_score(player_df):
# player_df['tourney_round'] = pd.Categorical(player_df['tourney_round'], categories=ROUND_ORDERING)
# for index, row in player_df.iterrows():
# match_date = row['tourney_date']
#
# previous_matches = player_df[(player_df.tourney_date <= match_date) & (player_df.index != index)]
# if not previous_matches.empty:
# previous_matches.sort_values(['tourney_round', 'tourney_date'], inplace=True, ascending=[False, False])
# player_df.at[index,'previous_match_score'] = previous_matches.iloc[0]['score_category']
# player_df.at[index, 'previous_match_tourney'] = previous_matches.iloc[0]['tourney_name']
# player_df.at[index, 'previous_match_tourney_id'] = previous_matches.iloc[0]['tourney_id']
# player_df.at[index, 'previous_match_tourney_date'] = previous_matches.iloc[0]['tourney_date']
#
# # print(previous_meetings)
# # player_df.drop('score', inplace=True, axis=1)
# return player_df
def encode_score_column(player_df):
for index, row in player_df.iterrows():
encoded_score = encode_score(row.score)
print("Old score ", row.score, " encoded score: ", encoded_score)
# print(row.score)
player_df.at[index, 'score_category'] = encoded_score
# player_df.drop('score', inplace=True, axis=1)
return player_df
def fill_nan(matches_df):
# Fill in the missing ranking - for unranked players - use a large value
matches_df.loser_rank = matches_df.loser_rank.fillna(value=1500)
matches_df.winner_rank = matches_df.winner_rank.fillna(value=1500)
# Fill in the missing ranking points - 0
matches_df.loser_rank_points = matches_df.loser_rank_points.fillna(0)
matches_df.winner_rank_points = matches_df.winner_rank_points.fillna(0)
# Fill in missing height for opponents - use average height
average_ht = (matches_df.loser_ht.mean() + matches_df.winner_ht.mean()) / 2
print("Average height is: ", average_ht)
matches_df.loser_ht = matches_df.loser_ht.fillna(value=average_ht)
matches_df.winner_ht = matches_df.winner_ht.fillna(value=average_ht)
return matches_df
def main():
# Import dataset
all_matches = pd.read_csv("data/wta/matches.csv", low_memory=False)
matches_2017 = pd.read_csv("data/wta/wta_matches_2017.csv", low_memory=False)
matches_2018 = pd.read_csv("data/wta/wta_matches_2018.csv", low_memory=False)
matches_2017 = matches_2017[COLUMNS]
matches_2017['year'] = 2017
matches_2018 = matches_2018[COLUMNS]
matches_2018['year'] = 2018
all_matches = pd.concat([all_matches, matches_2017, matches_2018])
# Fill nan values
all_matches = fill_nan(all_matches)
# Filter matches for given player
player_matches = all_matches[(all_matches.loser_name == PLAYER_NAME) | (all_matches.winner_name == PLAYER_NAME)]
player_matches = encode_score_column(player_matches)
# Create a new dataframe from the point of view of the player
# One dateframe for the wins
winner_df = player_matches[(player_matches.winner_name == PLAYER_NAME)]
winner_df['win'] = 1
winner_df.rename(columns={'loser_age': 'opponent_age', 'loser_entry': 'opponent_entry',
'loser_hand': 'opponent_hand', 'loser_ht': 'opponent_ht',
'loser_id': 'opponent_id', 'loser_ioc': 'opponent_ioc',
'loser_name': 'opponent_name', 'loser_rank': 'opponent_rank',
'loser_rank_points': 'opponent_rank_points', 'loser_seed': 'opponent_seed',
'winner_age': 'player_age', 'winner_entry': 'player_entry',
'winner_hand': 'player_hand', 'winner_ht': 'player_ht',
'winner_id': 'player_id', 'winner_ioc': 'player_ioc',
'winner_name': 'player_name', 'winner_rank': 'player_rank',
'winner_rank_points': 'player_rank_points', 'winner_seed': 'player_seed'}, inplace=True)
print(winner_df.head())
# One dataframe for the losses
loser_df = player_matches[(player_matches.loser_name == PLAYER_NAME)]
loser_df['win'] = 0
loser_df.rename(columns={'winner_age': 'opponent_age', 'winner_entry': 'opponent_entry',
'winner_hand': 'opponent_hand', 'winner_ht': 'opponent_ht',
'winner_id': 'opponent_id', 'winner_ioc': 'opponent_ioc',
'winner_name': 'opponent_name', 'winner_rank': 'opponent_rank',
'winner_rank_points': 'opponent_rank_points', 'winner_seed': 'opponent_seed',
'loser_age': 'player_age', 'loser_entry': 'player_entry',
'loser_hand': 'player_hand', 'loser_ht': 'player_ht',
'loser_id': 'player_id', 'loser_ioc': 'player_ioc',
'loser_name': 'player_name', 'loser_rank': 'player_rank',
'loser_rank_points': 'player_rank_points', 'loser_seed': 'player_seed'}, inplace=True)
loser_df['score_category'] = loser_df['score_category'].map(lambda s: 'HARD_LOSS' if s == 'EASY_WIN' else ('EASY_LOSS' if s == 'HARD_WIN' else ('MEDIUM_LOSS' if s == 'MEDIUM_WIN' else s)))
print(loser_df.head())
# Concatenate the two dataframes into a final one, containing all the matches (wins and losses) for the given player
player_df =
|
pd.concat([winner_df, loser_df])
|
pandas.concat
|
"""
Copyright 2022 HSBC Global Asset Management (Deutschland) GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
import pyratings as rtg
from tests import conftest
@pytest.fixture(scope="session")
def rtg_inputs_longterm():
return pd.DataFrame(
data={
"rtg_sp": ["AAA", "AA-", "AA+", "BB-", "C", np.nan, "BBB+", "AA"],
"rtg_moody": ["Aa1", "Aa3", "Aa2", "Ba3", "Ca", np.nan, np.nan, "Aa2"],
"rtg_fitch": ["AA-", np.nan, "AA-", "B+", "C", np.nan, np.nan, "AA"],
}
)
@pytest.fixture(scope="session")
def rtg_inputs_shortterm():
return pd.DataFrame(
data={
"rtg_sp": ["A-1", "A-3", "A-1+", "D", "B", np.nan, "A-2", "A-3"],
"rtg_moody": ["P-2", "NP", "P-1", "NP", "P-3", np.nan, np.nan, "P-3"],
"rtg_fitch": ["F1", np.nan, "F1", "F3", "F3", np.nan, np.nan, "F3"],
}
)
def test_get_best_rating_longterm_with_explicit_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_longterm_with_inferring_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_longterm, tenor="long-term")
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_explicit_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_shortterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="short-term",
)
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
|
pd.testing.assert_series_equal(actual, expectations)
|
pandas.testing.assert_series_equal
|
import sys, pandas, csv, os, pickle, codecs
#from scipy import spatial
import argparse, numpy, os
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial.distance import cdist, pdist, squareform
from timeit import default_timer as timer
from scipy.stats.mstats import rankdata
def compute(cos_scores,df_dic_source_target,params,out):
#score #2
total = cos_scores.shape[1]-1
sorted_scores= numpy.argsort(numpy.argsort(cos_scores,axis=1))#rankdata(cos_scores,axis=1)-1
#2nd method
diag = numpy.diagonal(cos_scores)
#max_scores = cos_scores.max(axis=1)
max_index = numpy.where(sorted_scores==total)[1]
max_scores = [cos_scores[idx,d]for idx,d in enumerate(max_index)]
top_index = numpy.where(sorted_scores==total-9)[1]
top_scores = [cos_scores[idx,d]for idx,d in enumerate(top_index)]
dscores = [1 if d==max_scores[idx] else 0 for idx,d in enumerate(diag)] #compares actual value not just rank
dtopscores = [1 if d>=top_scores[idx] else 0 for idx,d in enumerate(diag)]
df_dic_source_target['p1']=dscores
df_dic_source_target['p10']=dtopscores
p1 = df_dic_source_target['p1'].mean()
p10 = df_dic_source_target['p10'].mean()
if params.case:
out_file= "case-sensitive_"+os.path.basename(params.emb)
else:
out_file= "case-insensitive_"+os.path.basename(params.emb)
if "unq" in params.dic:
out_file = "unq_{}".format(out_file)
else:
out_file = "multi_{}".format(out_file)
df_dic_source_target.to_csv(os.path.dirname(params.emb) + "/" + out_file+out+'.out',sep='\t',columns=['source','target','p1','p10'],index=False,encoding='utf-8')
return [p1,p10]
def main():
STDERR_OUT = ""
parser = argparse.ArgumentParser(description='Word translation score')
parser.add_argument("--emb", type=str, default="", help="Path to embedding")
parser.add_argument("--dic", type=str, default="", help="Path to dictionary")
parser.add_argument("--prefix", type=str, default="", help="Language")
parser.add_argument("--out",type=str,default="",help="Output directory")
parser.add_argument("--case",type=bool,default=False,help="Case sensitive")
parser.add_argument("--merge_two_embeddings",type=bool,default=False,help="merge_two_embeddings")
parser.add_argument("--embedding_pr",type=str,default="",help="merge_two_embeddings")
parser.add_argument("--embedding_en",type=str,default="",help="merge_two_embeddings")
params = parser.parse_args()
qualifier = "unq" if "unq" in params.dic else "multi"
if(params.case):
IDENTIFIER = "{}:\t{}\t{}\t".format("Iteration", qualifier, "cAsE")
else:
IDENTIFIER = "{}:\t{}\t{}\t".format("Iteration", qualifier, "case")
print(IDENTIFIER, end='\t', file=sys.stdout, flush=True)
print(sys.argv, file=sys.stdout, flush=True)
if(params.merge_two_embeddings):
print(IDENTIFIER + "Merging two embeddings", file=sys.stdout, flush=True)
assert os.path.isfile(params.embedding_pr)
assert os.path.isfile(params.embedding_en)
f_emb = codecs.open(params.emb, 'w', 'utf-8-sig')
### Do line handling better ###
f_emb.write(
str(int(codecs.open(params.embedding_pr, 'r', 'utf-8-sig').readline().strip().split()[0]) + \
int(codecs.open(params.embedding_en, 'r', 'utf-8-sig').readline().strip().split()[0])))
f_emb.write(" " + codecs.open(params.embedding_pr, 'r', 'utf-8-sig').readline().strip().split()[1] + "\n")
### Line handling done ###
cnt_prefix = False
with codecs.open(params.embedding_pr, 'r', 'utf-8-sig') as read_f:
for line in read_f:
if cnt_prefix is False:
cnt_prefix = True
continue;
f_emb.write(params.prefix + ":" + line)
cnt_en = False
with codecs.open(params.embedding_en, 'r', 'utf-8-sig') as read_f:
for line in read_f:
if cnt_en is False:
cnt_en = True
continue;
f_emb.write("en:" + line)
f_emb.close()
assert os.path.isfile(params.emb)
assert os.path.isfile(params.dic)
we_name = os.path.basename(params.emb)
dic_name = os.path.basename(params.dic)
df_we=
|
pandas.read_csv(params.emb, skiprows=1, sep="\s+",header=None, quoting=csv.QUOTE_NONE,encoding='utf-8')
|
pandas.read_csv
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
import datetime
date_types = (
pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
_isdate = lambda x: isinstance(x, date_types)
SPAN = 2 / 3.
ALPHA = 0.05 # significance level for confidence interval
def _snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def _plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(_snakify, summary_names))
# TODO: indexing w/ data frame is messing everything up
fittedvalues = df['predicted_value'].values
predict_mean_ci_low = df['mean_ci_95%_low'].values
predict_mean_ci_upp = df['mean_ci_95%_upp'].values
predict_ci_low = df['predict_ci_95%_low'].values
predict_ci_upp = df['predict_ci_95%_upp'].values
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, fittedvalues, predict_mean_ci_low, predict_mean_ci_upp
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y)
y2 = pd.Series(upper * std + y)
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, y, y1, y2
def mavg(x,y, window):
"compute moving average"
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
std_err = pd.rolling_std(y, window)
y =
|
pd.rolling_mean(y, window)
|
pandas.rolling_mean
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser =
|
Series([10, 11, 12, 13])
|
pandas.Series
|
# -*- coding: utf-8 -*-
import pandas as pd
import io, sys, os, datetime
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import base64
import pickle
import plotly.express as px
import matplotlib
matplotlib.use('Agg')
from dash_extensions import Download
from dash_extensions.snippets import send_file
import json
import time
import subprocess
from pathlib import Path
import dash_uploader as du
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BaseFolder = "./"
FULL_PATH = os.path.abspath(BaseFolder)+"/"
sys.path.append(FULL_PATH)
from netcom.netcom import pathwayEnrichment, EdgeR_to_seeds, simulation
#Delete all results older then 5 days
deleteOlFiles_command = "find "+FULL_PATH+"Results/* -type d -ctime +5 -exec rm -rf {} \;"
os.system(deleteOlFiles_command)
try:
with open(BaseFolder+"data/DB/DB.pickle", 'rb') as handle:
DB = pickle.load(handle)
except:
DB = pd.read_pickle(BaseFolder+"data/DB/DB.pickle")
df_el_ = DB['full_enzymes_labels_jun.txt']
df_ecMapping_ = DB['ec_reac_mapping_jun.txt']
df_reactions_ = DB['reactions_3_balanced.txt']
df_ec_to_compoundIndex_ = DB['compound_labels_jun.txt']
def read_edgeR(df):
try:
df_edgeR_grouped = df.groupby("association")['X'].apply(list).to_frame()
except:
df_edgeR_grouped = df.groupby("association")['enzyme'].apply(list).to_frame()
return df_edgeR_grouped
def createParametersDict(folder):
start = time.time()
#write default parameters pickle file. this will be changed by the user later.
parametersDict = {}
parametersDict["drop_fragment_with_size"] = 1
parametersDict["filter_hubness"] = 25
parametersDict["soft_color_A"] = "green"
parametersDict["dark_color_A"] = "lime"
parametersDict["corrected_p-val"] = 0.05
parametersDict["enrichment_results_slice"] = [0, 100]
parametersDict["figure_name"] = "Figure"
parametersDict["network_layout_iter"] = 75
parametersDict["treatment_col"] = ""
parametersDict["comparison_col"] = ""
parametersDict["Not_associated_col"] = ""
parametersDict["Min_entities_Enrichment"] = 3
parametersDict["Max_entities_Enrichment"] = 25
parametersDict["Enriched_pathways"] = []
parametersDict["Final_folder"] = ""
folder = str(folder).strip("\"").strip("\'")
f = open(folder+"parametersDict.json", "w", encoding="utf8")
json.dump([parametersDict], f)
f.close()
try:
os.system("rm "+folder+"main_process_results_html.pkl")
except:
print()
end = time.time()
print("createParametersDict time (sec):")
print(end - start)
def loadParametersDict(folder):
start = time.time()
folder = str(folder).strip("\"").strip("\'")
f = open(folder+"parametersDict.json", "r")
output = json.load(f)
f.close()
end = time.time()
print("loadParametersDict time (sec):")
print(end - start)
return output[0]
def update_parameters(val, col, folder):
start = time.time()
try:
folder = str(folder).strip("\"").strip("\'")
f = open(folder+"parametersDict.json", "r")
parametersDict = json.load(f)[0]
f.close()
except:
folder = str(folder).strip("\"").strip("\'")
createParametersDict(folder)
f = open(folder+"parametersDict.json", "r")
parametersDict = json.load(f)[0]
f.close()
parametersDict[col] = val
f = open(folder+"parametersDict.json", "w", encoding="utf8")
json.dump([parametersDict], f)
f.close()
end = time.time()
print("update_parameters time (sec):")
print(end - start)
def presentDatasetStatistics(folder):
start = time.time()
print("loading edger")
df = pd.read_csv(folder+"raw_input_edger.csv")
colorsDict={}
colorsDict["treatment_col"] = "blue"
colorsDict["comparison_col"] = "red"
colorsDict["Not_associated"] = "Not_associated"
print("prep colors")
df["Treatment color"] = df["association"].replace(colorsDict)
#VOLCANO PLOT
try:
volcano = px.scatter(df, x="logFC", y="PValue",color="Treatment color",
hover_name="enzyme", log_y=True)
try:
labels = df[["association"]].value_counts().index
values = df[["association"]].value_counts().values
except:
labels = df["association"].value_counts().index
values = df["association"].value_counts().values
pieChart = go.Figure(data=[go.Pie(labels=labels, values=values)])
pvalHist = px.histogram(df, x="PValue")
descriptionGraphs = html.Div(dbc.Row([
dbc.Col(dcc.Graph(
id='volcano-scatter',
#style={'display': 'inline-block'},
figure=volcano
)),
dbc.Col(dcc.Graph(
id='pie-chart',
#style={'display': 'inline-block'},
figure=pieChart
)),
dbc.Col(dcc.Graph(
id='pval-hist',
#style={'display': 'inline-block'},
figure=pvalHist
)),
])
)
end = time.time()
print("presentDatasetStatistics time (sec):")
print(end - start)
#calculate enrichment for keep_pathway.txt file creation
parametersDict = loadParametersDict(folder)
#print(parametersDict)
folder = str(folder).strip("\"").strip("\'")
FinalFolder_ = folder
Seeds_A_input, T1_seeds_tag, ECs_A_input, Seeds_B_input, T2_seeds_tag, ECs_B_input, Seeds_All_input, ECs_All_input=EdgeR_to_seeds(edgeR_row_location=FinalFolder_+"raw_input_edger.csv",
col_treatment_1=parametersDict["treatment_col"],
col_treatment_2=parametersDict["comparison_col"],
outputFolder=FinalFolder_,
input_sep=",")
with open(FinalFolder_+"keep_pathways.txt", 'w') as f:
f.write("\n")
pathways_enzymes_A = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_A_input,
All_B=ECs_B_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["treatment_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
pathways_enzymes_B = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_B_input,
All_B=ECs_A_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["comparison_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
return descriptionGraphs
except Exception as e:
print(e)
end = time.time()
print("presentDatasetStatistics time (sec):")
print(end - start)
def CreateBarPlot(folder):
start = time.time()
parametersDict = loadParametersDict(folder)
folder = str(folder).strip("\"").strip("\'")
FinalFolder_ = folder
df = pd.read_csv(FinalFolder_+"raw_input_edger.csv")
df=read_edgeR(df)
Seeds_A_input, T1_seeds_tag, ECs_A_input, Seeds_B_input, T2_seeds_tag, ECs_B_input, Seeds_All_input, ECs_All_input=EdgeR_to_seeds(edgeR_row_location=FinalFolder_+"raw_input_edger.csv",
col_treatment_1=parametersDict["treatment_col"],
col_treatment_2=parametersDict["comparison_col"],
outputFolder=FinalFolder_,
input_sep=",")
with open(FinalFolder_+"keep_pathways.txt", 'w') as f:
f.write("\n")
pathways_enzymes_A = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_A_input,
All_B=ECs_B_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["treatment_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
pathways_enzymes_B = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=ECs_B_input,
All_B=ECs_A_input,
All_ecs_list_=ECs_A_input+ECs_B_input,
input_type="enzymes", # "compound" or "enzyme"
outputfilename=parametersDict["comparison_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
df_enzymes_A = pd.read_csv(FinalFolder_+parametersDict["treatment_col"]+"_pathway.csv")
df_enzymes_B = pd.read_csv(FinalFolder_+parametersDict["comparison_col"]+"_pathway.csv")
pathways_seed_rich = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=folder,
DE_ecs_list_=T1_seeds_tag.copy(),
All_B=T2_seeds_tag.copy(),
All_ecs_list_=T1_seeds_tag.copy()+T2_seeds_tag.copy(),
input_type="metabolites", # "metabolites" or "enzyme"
outputfilename=parametersDict["treatment_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
pathways_seed_poor = pathwayEnrichment(BaseFolder=BaseFolder,
FinalFolder=FinalFolder_,
DE_ecs_list_=T2_seeds_tag.copy(),
All_B=T1_seeds_tag.copy(),
All_ecs_list_=T1_seeds_tag.copy()+T2_seeds_tag.copy(),
input_type="metabolites", # "metabolites" or "enzyme"
outputfilename=parametersDict["comparison_col"],
minEntitiesInPathway=parametersDict["Min_entities_Enrichment"],
maxEntitiesInPathway=parametersDict["Max_entities_Enrichment"],
drop_pathways=False,
keep_pathways=FinalFolder_+"keep_pathways.txt"
)
df_compounds_A = pd.read_csv(FinalFolder_+parametersDict["treatment_col"]+"_pathway.csv")
df_compounds_B = pd.read_csv(FinalFolder_+parametersDict["comparison_col"]+"_pathway.csv")
listOfAllPathways = df_enzymes_A["Pathway"].values.tolist()
listOfAllPathways += df_enzymes_B["Pathway"].values.tolist()
listOfAllPathways += df_compounds_A["Pathway"].values.tolist()
listOfAllPathways += df_compounds_B["Pathway"].values.tolist()
update_parameters(list(set(listOfAllPathways)), "Enriched_pathways", folder)
BarPltEnzymes = dcc.Graph(id='g1',
figure={'data': [
{'x': df_enzymes_A["Pathway"].values.tolist(), 'y': df_enzymes_A["Count"].values.tolist(), 'type': 'bar', 'name':parametersDict["treatment_col"]},
{'x': df_enzymes_B["Pathway"].values.tolist(), 'y': df_enzymes_B["Count"].values.tolist(), 'type': 'bar', 'name':parametersDict["comparison_col"]}
],
'layout':{
'xaxis': {'autorange': True, 'title': 'X Axis', 'automargin': True}
}},
style={'height': '800px', 'width': '1000px'}
)
BarPltCompounds = dcc.Graph(id='g1',
figure={'data': [
{'x': df_compounds_A["Pathway"].values.tolist(), 'y': df_compounds_A["Count"].values.tolist(), 'type': 'bar', 'name':parametersDict["treatment_col"]},
{'x': df_compounds_B["Pathway"].values.tolist(), 'y': df_compounds_B["Count"].values.tolist(), 'type': 'bar', 'name':parametersDict["comparison_col"]}
],
'layout':{
'xaxis': {'autorange': True, 'title': 'X Axis', 'automargin': True}
}},
style={'height': '800px', 'width': '1000px'}
)
BarPlts = html.Div([
html.Div([
html.Div([
html.H3('Enzymes - pathways histogram'),
BarPltEnzymes
], className="six columns"),
html.Div([
html.H3('Compounds - pathways histogram'),
BarPltCompounds
], className="six columns"),
],
className="row")
])
end = time.time()
print("CreateBarPlot time (sec):")
print(end - start)
return BarPlts
def slider_enrch_min():
return html.Div([
dbc.FormGroup(
[
dbc.Label("Entities number in a pathway [range, used for enrichment analysis]",
html_for="slider-enrch-min",
style={'text-align': 'center', 'font-size': '100%', 'text-transform': 'uppercase'}),
dcc.RangeSlider(id="slider-enrch-min",
min=1,
max=100,
step=1,
marks={i: str(i) for i in range(100)},
value=[5, 25])
]
),
html.Div(id='slider-output-enrch-min-container', style={'margin-bottom': 20})
],
style={'padding': '10px 10px 10px 10px'}
)
def slider_node_hubness():
return html.Div([
dbc.FormGroup(
[
dbc.Label("Limit node hubness [default = 50]",
html_for="slider-hubness",
style={'text-align': 'center', 'font-size': '100%', 'text-transform': 'uppercase'}
),
dcc.Slider(id="slider-hubness",
min=1,
max=100,
step=1,
marks={i: str(i) for i in range(100)},
value=50),
]
),
html.Div(id='slider-output-hubness-container', style={'margin-bottom': 20})
]
)
def slider_network_iter():
return html.Div([
dbc.FormGroup(
[
dbc.Label("Set network layout iterations [default = 75]",
html_for="slider-iter",
style={'text-align': 'center', 'font-size': '100%', 'text-transform': 'uppercase'}
),
dcc.Slider(id="slider-iter",
min=1,
max=200,
step=5,
marks={i: str(i) for i in range(0, 200, 5)},
value=75),
]
),
html.Div(id='slider-output-iter-container', style={'margin-bottom': 20})
]
)
def select_colors_seeds():
return html.Div([
dbc.FormGroup(
[
dbc.Label("Environmental resource node color",
html_for="seeds-color-dropdown", width=4),
dbc.Col(
dcc.Dropdown(
id="seeds-color-dropdown",
options=[
{'label': 'Green', 'value': 'green'},
{'label': 'Orange', 'value': 'orange'},
{'label': 'Blue', 'value': 'blue'},
{'label': 'Red', 'value': 'red'},
{'label': 'Goldenrod', 'value': 'goldenrod'},
{'label': 'Magenta', 'value': 'magenta'},
{'label': 'Medium Purple', 'value': 'mediumpurple'},
{'label': 'Chocolate', 'value': 'chocolate'},
{'label': 'Khaki', 'value': 'khaki'},
],
value='green',
),
width=10,
),
],
row=True,
),
html.Div(id='output-seedcolor-container', style={'padding': 10})
],
style={"width": "30%",
#'display':'inline-block',
'text-align':'center',
#'padding-left':'35%',
#'padding-right':'35%'
}
)
def select_colors_unique():
return html.Div([
dbc.FormGroup(
[
dbc.Label("Unique node color", html_for="unique-color-dropdown", width=4),
dbc.Col(
dcc.Dropdown(
id="unique-color-dropdown",
options=[
{'label': 'Lime', 'value': 'lime'},
{'label': 'Salmon', 'value': 'salmon'},
{'label': 'Cyan', 'value': 'cyan'},
{'label': 'Pink', 'value': 'pink'},
{'label': 'Yellow', 'value': 'yellow'},
{'label': 'Gold', 'value': 'gold'},
{'label': 'Light Purple', 'value': 'lightpurple'},
{'label': 'Gray', 'value': 'gray'},
{'label': 'Light Blue', 'value': 'lightblue'},
],
value='lime',
),
width=10,
),
],
row=True,
),
html.Div(id='output-uniquecolor-container', style={'padding': 10})
],
style={"width": "30%",
#'display':'inline-block',
'text-align':'center',
#'padding-left':'35%',
#'padding-right':'35%'
}
)
def execution_button():
return html.Div([
html.Div(dcc.Input(id='input-on-submit', type='hidden')),
html.Button('Submit', id='submit-val', n_clicks=0),
html.Div(id='container-button-basic'),
#html.Div(id='container-button-basic', children=''),
dcc.Interval(id='final-results-listener', interval=5000, n_intervals=0, max_intervals=180),#fires 180 times * 5 sec = 15minutes
html.Div(id='container-final-results')
])
def pathways_dropout(folder):
#Read enrichment csv files
folder = str(folder).strip("\"").strip("\'")
parametersDict = loadParametersDict(folder)
df_compounds_A = pd.read_csv(folder+parametersDict["treatment_col"]+"_pathway.csv")
df_compounds_B =
|
pd.read_csv(folder+parametersDict["comparison_col"]+"_pathway.csv")
|
pandas.read_csv
|
# coding: utf-8
import pytest
from datetime import datetime
import pandas as pd
from dgp.lib import time_utils as tu
def test_leap_seconds():
# TO DO: Test edge cases
gpsweek = 1959
gpsweeksecond = 219698.000
unixtime = 1500987698 # 2017-07-25 13:01:38+00:00
dt = datetime.strptime('2017-07-25 13:01:38', '%Y-%m-%d %H:%M:%S')
expected1 = 18
date1 = '08-07-2015'
date2 = '08/07/2015'
date3 = '08/07-2015'
expected2 = 17
res_gps = tu.leap_seconds(week=gpsweek, seconds=gpsweeksecond)
res_unix = tu.leap_seconds(seconds=unixtime)
res_datetime = tu.leap_seconds(datetime=dt)
res_date1 = tu.leap_seconds(date=date1)
res_date2 = tu.leap_seconds(date=date2, dateformat='%m/%d/%Y')
assert expected1 == res_gps
assert expected1 == res_unix
assert expected1 == res_datetime
assert expected2 == res_date1
assert expected2 == res_date2
with pytest.raises(ValueError):
tu.leap_seconds(date=date3)
with pytest.raises(ValueError):
tu.leap_seconds(minutes=dt)
def test_convert_gps_time():
gpsweek = 1959
gpsweeksecond = 219698.000
result = 1500987698 # 2017-07-25 13:01:38+00:00
test_res = tu.convert_gps_time(gpsweek, gpsweeksecond)
assert result == test_res
@pytest.mark.parametrize(
'given_sow, expected_dt', [
(312030.8, datetime(2017, 3, 22, 14, 40, 30, 800000)),
(312030.08, datetime(2017, 3, 22, 14, 40, 30, 80000)),
(312030.008, datetime(2017, 3, 22, 14, 40, 30, 8000)),
(312030.0008, datetime(2017, 3, 22, 14, 40, 30, 800))
]
)
def test_convert_gps_time_datetime(given_sow, expected_dt):
gpsweek =
|
pd.Series([1941])
|
pandas.Series
|
from .nwb_interface import NWBDataset
from .chop import ChopInterface, chop_data, merge_chops
from itertools import product
import numpy as np
import pandas as pd
import h5py
import sys
import os
import logging
logger = logging.getLogger(__name__)
PARAMS = {
'mc_maze': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 100,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 70,
},
},
'mc_rtt': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'finger_vel',
'lag': 140,
'make_params': {
'align_field': 'start_time',
'align_range': (0, 600),
'allow_overlap': True,
},
'eval_make_params': {
'align_field': 'start_time',
'align_range': (0, 600),
'allow_overlap': True,
},
'fp_len': 200,
},
'area2_bump': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'decode_masks': lambda x: np.stack([x.ctr_hold_bump == 0, x.ctr_hold_bump == 1]).T,
'lag': -20,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-100, 500),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-100, 500),
'allow_overlap': True,
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['cond_dir', 'ctr_hold_bump'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-100, 500),
},
'kern_sd': 40,
},
},
'dmfc_rsg': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'behavior_source': 'trial_info',
'behavior_mask': lambda x: x.is_outlier == 0,
'behavior_field': ['is_eye', 'theta', 'is_short', 'ts', 'tp'],
'jitter': lambda x: np.stack([
np.zeros(len(x)),
np.where(x.split == 'test', np.zeros(len(x)),
np.clip(1500.0 - x.get('tp', pd.Series(np.nan)).to_numpy(), 0.0, 300.0))
]).T,
'make_params': {
'align_field': 'go_time',
'align_range': (-1500, 0),
'allow_overlap': True,
},
'eval_make_params': {
'start_field': 'set_time',
'end_field': 'go_time',
'align_field': 'go_time',
},
'eval_tensor_params': {
'seg_len': 1500,
'pad': 'front'
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['is_eye', 'theta', 'is_short', 'ts'],
'make_params': {
'start_field': 'set_time',
'end_field': 'go_time',
'align_field': 'go_time',
},
'kern_sd': 70,
'pad': 'front',
'seg_len': 1500,
'skip_mask': lambda x: x.is_outlier == 1,
},
},
'mc_maze_large': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 120,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 50,
},
},
'mc_maze_medium': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 120,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 50,
},
},
'mc_maze_small': {
'spk_field': 'spikes',
'hospk_field': 'heldout_spikes',
'rate_field': 'rates',
'horate_field': 'heldout_rates',
'behavior_source': 'data',
'behavior_field': 'hand_vel',
'lag': 120,
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'eval_make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'fp_len': 200,
'psth_params': {
'cond_fields': ['trial_type', 'trial_version'],
'make_params': {
'align_field': 'move_onset_time',
'align_range': (-250, 450),
},
'kern_sd': 50,
},
},
}
def make_train_input_tensors(dataset, dataset_name,
trial_split='train',
update_params=None,
save_file=True,
return_dict=True,
save_path="train_input.h5",
include_behavior=False,
include_forward_pred=False,
seed=0):
"""Makes model training input tensors.
Creates 3d arrays containing heldin and heldout spikes
for train trials (and other data if indicated)
and saves them as .h5 files and/or returns them
in a dict
Parameters
----------
dataset : NWBDataset
An instance of NWBDataset to make tensors from
dataset_name : {'mc_maze', 'mc_rtt', 'area2_bum', 'dmfc_rsg',
'mc_maze_large', 'mc_maze_medium', 'mc_maze_small'}
Name of dataset. Used to select default
parameters from PARAMS
trial_split : {'train', 'val'}, array-like, or list, optional
The selection of trials to make the tensors with.
It can be the predefined trial splits 'train'
or 'val', an array-like boolean mask (see the
include_trials argument of `NWBDataset.make_trial_data`),
or a list containing the previous two types, which
will include trials that are in any of the splits
in the list. By default 'train'
update_params : dict, optional
New parameters with which to update default
dict from PARAMS
save_file : bool, optional
Whether to save the reshaped data to an
h5 file, by default True
return_dict : bool, optional
Whether to return the reshaped data in a
data dict with the same keys as the h5 files,
by default True
save_path : str, optional
Path to where the h5 output file should be saved
include_behavior : bool, optional
Whether to include behavioral data in the
returned tensors, by default False
include_forward_pred : bool, optional
Whether to include forward-prediction spiking
data in the returned tensors, by default False
seed : int, optional
Seed for random generator used for jitter
Returns
-------
dict of np.array
A dict containing 3d numpy arrays of
spiking data for indicated trials, and possibly
additional data based on provided arguments
"""
assert isinstance(dataset, NWBDataset), "`dataset` must be an instance of NWBDataset"
assert dataset_name in PARAMS.keys(), f"`dataset_name` must be one of {list(PARAMS.keys())}"
assert isinstance(trial_split, (pd.Series, np.ndarray, list)) or trial_split in ['train', 'val'], \
"Invalid `trial_split` argument. Please refer to the documentation for valid choices"
# Fetch and update params
params = PARAMS[dataset_name].copy()
if update_params is not None:
params.update(update_params)
# Add filename extension if necessary
if not save_path.endswith('.h5'):
save_path = save_path + '.h5'
# unpack params
spk_field = params['spk_field']
hospk_field = params['hospk_field']
make_params = params['make_params'].copy()
jitter = params.get('jitter', None)
# Prep mask
trial_mask = _prep_mask(dataset, trial_split)
# Prep jitter if necessary
if jitter is not None:
np.random.seed(seed)
jitter_vals = _prep_jitter(dataset, trial_mask, jitter)
align_field = make_params.get('align_field', make_params.get('start_field', 'start_time'))
align_vals = dataset.trial_info[trial_mask][align_field]
align_jit = align_vals + pd.to_timedelta(jitter_vals, unit='ms')
align_jit.name = align_field.replace('_time', '_jitter_time')
dataset.trial_info = pd.concat([dataset.trial_info, align_jit], axis=1)
if 'align_field' in make_params:
make_params['align_field'] = align_jit.name
else:
make_params['start_field'] = align_jit.name
# Make output spiking arrays and put into data_dict
train_dict = make_stacked_array(dataset, [spk_field, hospk_field], make_params, trial_mask)
data_dict = {
'train_spikes_heldin': train_dict[spk_field],
'train_spikes_heldout': train_dict[hospk_field],
}
# Add behavior data if necessary
if include_behavior:
behavior_source = params['behavior_source']
behavior_field = params['behavior_field']
behavior_make_params = _prep_behavior(dataset, params.get('lag', None), make_params)
# Retrieve behavior data from indicated source
if behavior_source == 'data':
train_behavior = make_stacked_array(dataset, behavior_field, behavior_make_params, trial_mask)[behavior_field]
else:
train_behavior = dataset.trial_info[trial_mask][behavior_field].to_numpy()
# Filter out behavior on certain trials if necessary
if 'behavior_mask' in params:
if callable(params['behavior_mask']):
behavior_mask = params['behavior_mask'](dataset.trial_info[trial_mask])
else:
behavior_mask, _ = params['behavior_mask']
train_behavior[~behavior_mask] = np.nan
data_dict['train_behavior'] = train_behavior
# Add forward prediction data if necessary
if include_forward_pred:
fp_len = params['fp_len']
fp_steps = fp_len / dataset.bin_width
fp_make_params = _prep_fp(make_params, fp_steps, dataset.bin_width)
fp_dict = make_stacked_array(dataset, [spk_field, hospk_field], fp_make_params, trial_mask)
data_dict['train_spikes_heldin_forward'] = fp_dict[spk_field]
data_dict['train_spikes_heldout_forward'] = fp_dict[hospk_field]
# Delete jitter column
if jitter is not None:
dataset.trial_info.drop(align_jit.name, axis=1, inplace=True)
# Save and return data
if save_file:
save_to_h5(data_dict, save_path, overwrite=True)
if return_dict:
return data_dict
def make_eval_input_tensors(dataset, dataset_name,
trial_split='val',
update_params=None,
save_file=True,
return_dict=True,
save_path="eval_input.h5",
seed=0):
"""Makes model evaluation input tensors.
Creates 3d arrays containing heldin spiking for
eval trials (and heldout spiking if available)
and saves them as .h5 files and/or returns them
in a dict
Parameters
----------
dataset : NWBDataset
An instance of NWBDataset to make tensors from
dataset_name : {'mc_maze', 'mc_rtt', 'area2_bum', 'dmfc_rsg',
'mc_maze_large', 'mc_maze_medium', 'mc_maze_small'}
Name of dataset. Used to select default
parameters from PARAMS
trial_split : {'train', 'val', 'test'}, array-like, or list, optional
The selection of trials to make the tensors with.
It can be the predefined trial splits 'train'
'val', or 'test', an array-like boolean mask (see the
include_trials argument of `NWBDataset.make_trial_data`),
or a list containing the previous two types, which
will include trials that are in any of the splits
in the list. By default 'val'
update_params : dict, optional
New parameters with which to update default
dict from PARAMS
save_file : bool, optional
Whether to save the reshaped data to an
h5 file, by default True
return_dict : bool, optional
Whether to return the reshaped data in a
data dict with the same keys as the h5 files,
by default True
save_path : str, optional
Path to where the h5 output file should be saved
seed : int, optional
Seed for random generator used for jitter
Returns
-------
dict of np.array
A dict containing 3d numpy arrays of
spiking data for indicated trials
"""
assert isinstance(dataset, NWBDataset), "`dataset` must be an instance of NWBDataset"
assert dataset_name in PARAMS.keys(), f"`dataset_name` must be one of {list(PARAMS.keys())}"
assert isinstance(trial_split, (pd.Series, np.ndarray, list)) or trial_split in ['train', 'val', 'test'], \
"Invalid `trial_split` argument. Please refer to the documentation for valid choices"
# Fetch and update params
params = PARAMS[dataset_name].copy()
if update_params is not None:
params.update(update_params)
# Add filename extension if necessary
if not save_path.endswith('.h5'):
save_path = save_path + '.h5'
# Unpack params
spk_field = params['spk_field']
hospk_field = params['hospk_field']
make_params = params['make_params'].copy()
make_params['allow_nans'] = True
jitter = params.get('jitter', None)
# Prep mask
trial_mask = _prep_mask(dataset, trial_split)
# Prep jitter if necessary
if jitter is not None:
np.random.seed(seed)
jitter_vals = _prep_jitter(dataset, trial_mask, jitter)
align_field = make_params.get('align_field', make_params.get('start_field', 'start_time'))
align_vals = dataset.trial_info[trial_mask][align_field]
align_jit = align_vals + pd.to_timedelta(jitter_vals, unit='ms')
align_jit.name = align_field.replace('_time', '_jitter_time')
dataset.trial_info = pd.concat([dataset.trial_info, align_jit], axis=1)
if 'align_field' in make_params:
make_params['align_field'] = align_jit.name
else:
make_params['start_field'] = align_jit.name
# Make output spiking arrays and put into data_dict
if not np.any(dataset.trial_info[trial_mask].split == 'test'):
eval_dict = make_stacked_array(dataset, [spk_field, hospk_field], make_params, trial_mask)
data_dict = {
'eval_spikes_heldin': eval_dict[spk_field],
'eval_spikes_heldout': eval_dict[hospk_field],
}
else:
eval_dict = make_stacked_array(dataset, [spk_field], make_params, trial_mask)
data_dict = {
'eval_spikes_heldin': eval_dict[spk_field],
}
# Delete jitter column
if jitter is not None:
dataset.trial_info.drop(align_jit.name, axis=1, inplace=True)
# Save and return data
if save_file:
save_to_h5(data_dict, save_path, overwrite=True)
if return_dict:
return data_dict
def make_eval_target_tensors(dataset, dataset_name,
train_trial_split='train',
eval_trial_split='val',
update_params=None,
save_file=True,
return_dict=True,
save_path="target_data.h5",
include_psth=False,
seed=0):
"""Makes tensors containing target data used to evaluate model predictions.
Creates 3d arrays containing true heldout spiking data
for eval trials and other arrays for model evaluation and saves them
as .h5 files and/or returns them in a dict. Because heldout
data is not available in the 'test' split, this function can not
be used on the 'test' split, though it is what we used to generate
the EvalAI evaluation data
Parameters
----------
dataset : NWBDataset
An instance of NWBDataset to make tensors from
dataset_name : {'mc_maze', 'mc_rtt', 'area2_bum', 'dmfc_rsg',
'mc_maze_large', 'mc_maze_medium', 'mc_maze_small'}
Name of dataset. Used to select default
parameters from PARAMS
train_trial_split : {'train', 'val'}, array-like, or list, optional
The selection of trials used for training.
It can be the predefined trial splits 'train'
or 'val', an array-like boolean mask (see the
include_trials argument of `NWBDataset.make_trial_data`),
or a list containing the previous two types, which
will include trials that are in any of the splits
in the list. By default 'train'
eval_trial_split : {'train', 'val'}, array-like, or list, optional
The selection of trials used for evaluation.
It follows the same format as train_trial_split
described above. By default 'val'
update_params : dict, optional
New parameters with which to update default
dict from PARAMS
save_file : bool, optional
Whether to save the reshaped data to an
h5 file, by default True
return_dict : bool, optional
Whether to return the reshaped data in a
data dict with the same keys as the h5 files,
by default True
save_path : str, optional
Path to where the h5 output file should be saved
include_psth : bool, optional
Whether to make PSTHs for evaluation of match
to PSTH, by default False. Since PSTH calculation
is memory and cpu-intensive in its current implementation,
it may be desirable to skip this step
seed : int, optional
Seed for random generator used for jitter
Returns
-------
nested dict of np.array
Dict containing data for evaluation, including
held-out spiking activity for eval trials
and behavioral correlates
"""
assert isinstance(dataset, NWBDataset), "`dataset` must be an instance of NWBDataset"
assert dataset_name in PARAMS.keys(), f"`dataset_name` must be one of {list(PARAMS.keys())}"
assert isinstance(train_trial_split, (pd.Series, np.ndarray, list)) or train_trial_split in ['train', 'val', 'test'], \
"Invalid `train_trial_split` argument. Please refer to the documentation for valid choices"
assert isinstance(eval_trial_split, (pd.Series, np.ndarray, list)) or eval_trial_split in ['train', 'val', 'test'], \
"Invalid `eval_trial_split` argument. Please refer to the documentation for valid choices"
# Fetch and update params
params = PARAMS[dataset_name].copy()
if update_params is not None:
params.update(update_params)
# Add filename extension if necessary
if not save_path.endswith('.h5'):
save_path = save_path + '.h5'
# unpack params
spk_field = params['spk_field']
hospk_field = params['hospk_field']
make_params = params['eval_make_params'].copy()
behavior_source = params['behavior_source']
behavior_field = params['behavior_field']
jitter = params.get('jitter', None)
eval_tensor_params = params.get('eval_tensor_params', {}).copy()
fp_len = params['fp_len']
fp_steps = fp_len / dataset.bin_width
# Properly name output fields based on submission bin width
suf = '' if (dataset.bin_width == 5) else f'_{dataset.bin_width}'
# Prep masks
train_mask = _prep_mask(dataset, train_trial_split)
eval_mask = _prep_mask(dataset, eval_trial_split)
if isinstance(eval_trial_split, str) and eval_trial_split == 'test':
ignore_mask = dataset.trial_info.split == 'none'
else:
ignore_mask = ~(train_mask | eval_mask)
# Prep jitter if necessary
if jitter is not None:
align_field = make_params.get('align_field', make_params.get('start_field', 'start_time'))
np.random.seed(seed)
train_jitter_vals = _prep_jitter(dataset, train_mask, jitter)
train_align_vals = dataset.trial_info[train_mask][align_field]
train_align_jit = train_align_vals + pd.to_timedelta(train_jitter_vals, unit='ms')
train_align_jit.name = align_field.replace('_time', '_jitter_time')
dataset.trial_info = pd.concat([dataset.trial_info, train_align_jit], axis=1)
np.random.seed(seed)
eval_jitter_vals = _prep_jitter(dataset, eval_mask, jitter)
eval_align_vals = dataset.trial_info[eval_mask][align_field]
eval_align_jit = eval_align_vals +
|
pd.to_timedelta(eval_jitter_vals, unit='ms')
|
pandas.to_timedelta
|
from abc import abstractmethod
import datetime as dt
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_equal
from pandas import DataFrame, Series, Timedelta, date_range
import pytest
from arch import doc
from arch.univariate.base import implicit_constant
from arch.utility.array import (
ConcreteClassMeta,
DocStringInheritor,
cutoff_to_index,
date_to_index,
ensure1d,
ensure2d,
find_index,
parse_dataframe,
)
@pytest.fixture(scope="function")
def rng():
return RandomState(12345)
def test_ensure1d():
out = ensure1d(1.0, "y")
assert_equal(out, np.array([1.0]))
out = ensure1d(np.arange(5.0), "y")
assert_equal(out, np.arange(5.0))
out = ensure1d(np.arange(5.0)[:, None], "y")
assert_equal(out, np.arange(5.0))
in_array = np.reshape(np.arange(16.0), (4, 4))
with pytest.raises(ValueError):
ensure1d(in_array, "y")
y = Series(np.arange(5.0))
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y = DataFrame(y)
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y.columns = [1]
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
assert ys.name == "1"
y = Series(np.arange(5.0), name="series")
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y = DataFrame(y)
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
ys.name = 1
ys = ensure1d(ys, None, True)
assert isinstance(ys, Series)
assert ys.name == "1"
y = DataFrame(np.reshape(np.arange(10), (5, 2)))
with pytest.raises(ValueError):
ensure1d(y, "y")
def test_ensure2d():
s = Series([1, 2, 3], name="x")
df = ensure2d(s, "x")
assert isinstance(df, DataFrame)
df2 = ensure2d(df, "x")
assert df is df2
npa = ensure2d(s.values, "x")
assert isinstance(npa, np.ndarray)
assert npa.ndim == 2
npa = ensure2d(np.array(1.0), "x")
assert isinstance(npa, np.ndarray)
assert npa.ndim == 2
with pytest.raises(ValueError):
ensure2d(np.array([[[1]]]), "x")
with pytest.raises(TypeError):
ensure2d([1], "x")
def test_parse_dataframe():
s = Series(np.arange(10.0), name="variable")
out = parse_dataframe(s, "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["variable"])
df = DataFrame(s)
out = parse_dataframe(df, "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["variable"])
out = parse_dataframe(np.arange(10.0), "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["y"])
out = parse_dataframe(None, "name")
assert out[0] == ["name"]
assert isinstance(out[1], np.ndarray)
assert out[1].shape == (0,)
def test_implicit_constant(rng):
x = rng.standard_normal((1000, 2))
assert not implicit_constant(x)
x[:, 0] = 1.0
assert implicit_constant(x)
x = rng.standard_normal((1000, 3))
x[:, 0] = x[:, 0] > 0
x[:, 1] = 1 - x[:, 0]
assert implicit_constant(x)
def test_docstring_inheritor():
class A(object, metaclass=DocStringInheritor):
"""
Docstring
"""
class B(A):
pass
assert_equal(B.__doc__, A.__doc__)
def test_date_to_index():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
index = date_to_index(date_index[0], date_index)
assert_equal(index, 0)
index = date_to_index(date_index[-1], date_index)
assert_equal(index, date_index.shape[0] - 1)
index = date_to_index("2009-08-02", date_index)
assert_equal(index, 500)
index = date_to_index("2009-08-04", date_index)
assert_equal(index, 501)
index = date_to_index("2009-08-01", date_index)
assert_equal(index, 500)
index = date_to_index(dt.datetime(2009, 8, 1), date_index)
assert_equal(index, 500)
with pytest.raises(ValueError):
date_to_index(dt.date(2009, 8, 1), date_index)
z = y + 0.0
z.index = np.arange(3000)
num_index = z.index
with pytest.raises(ValueError):
date_to_index(dt.datetime(2009, 8, 1), num_index)
idx = date_range("1999-12-31", periods=3)
df = DataFrame([1, 2, 3], index=idx[::-1])
with pytest.raises(ValueError):
date_to_index(idx[0], df.index)
df = DataFrame([1, 2, 3], index=[idx[0]] * 3)
with pytest.raises(ValueError):
date_to_index(idx[0], df.index)
with pytest.raises(ValueError):
date_to_index("NaT", idx)
# check whether this also works for a localized datetimeindex
date_index = date_range("20000101", periods=3000, freq="W", tz="Europe/Berlin")
index = date_to_index(date_index[0], date_index)
assert_equal(index, 0)
def test_date_to_index_timestamp():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
date = y.index[1000]
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index = date_to_index(date, date_index)
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, 1000)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
def test_():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
date = date_index[1000] + Timedelta(1, "D")
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index = date_to_index(date, date_index)
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, 1001)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
date = date_index[0] - Timedelta(1, "D")
index = date_to_index(date, date_index)
assert_equal(index, 0)
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
def test_cutoff_to_index():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
assert cutoff_to_index(1000, date_index, 0) == 1000
assert cutoff_to_index(int(1000), date_index, 0) == 1000
assert cutoff_to_index(np.int16(1000), date_index, 0) == 1000
assert cutoff_to_index(np.int64(1000), date_index, 0) == 1000
assert cutoff_to_index(date_index[1000], date_index, 0) == 1000
assert cutoff_to_index(None, date_index, 1000) == 1000
def test_find_index():
index =
|
date_range("2000-01-01", periods=5000)
|
pandas.date_range
|
import numpy
import matplotlib.pyplot as plt
import pandas
from pandas import DataFrame
import math
import yfinance as yf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
import sys
import os
import time
import random
import requests
from datetime import datetime
import hashlib
import hmac
from urllib.parse import urlparse
import json
#GETS NEW DATA FOR BTC PRICE FROM YAHOO FINANCE
crypto = "BTC-USD"
#crypto = "ETH-USD"
btc = yf.Ticker(crypto)
history = btc.history(period='1mo',interval="90m")
history.to_csv('out.csv')
#tickerdata = pandas.read_csv('BTCUSDT.csv')
#tickerdata = DataFrame(tickerdata)
#print(tickerdata.values[1])
#TESTING
futuretime = 1
def predictdata(tickerdata):
global futuretime
scaler = MinMaxScaler(feature_range = (0,1))
scaler1 = MinMaxScaler(feature_range = (0,1))
#i = 0
dataset = DataFrame()
dataset1 = DataFrame()
for i in range(1,len(tickerdata)):
if i <= int(len(tickerdata) * 0.6):
dataset = dataset.append(tickerdata.iloc[i])
if i >= int(len(tickerdata) * 0.6):
dataset1 = dataset1.append(tickerdata.iloc[i])
#file = open("1minprices.txt","r")
#newdata = file.readlines()
#file.close()
#for item in newdata:
dataset = DataFrame(dataset)
dataset = scaler.fit_transform(dataset)
dataset1 = DataFrame(dataset1)
dataset1 = scaler1.fit_transform(dataset1)
#PRINTS REAL DATA FOR COMPARISON
print(dataset1[0])
#plt.plot(dataset)
#plt.plot(dataset1)
#plt.show()
#INITIATES NETWORK
mind = Sequential()
trainx = []
trainy = []
testx = []
testy = []
#AMOUNT OF TIME ALGO SHOULD SEE IN THE PAST
#(IF DATA IS 1 DAY DATA, THEN 1 TIME STEP = 1 DAY)
timesteps = 30
#ADDS ITEMS TO TRAINING DATASET
for i in range(timesteps, len(dataset)):
trainx.append(dataset[i-timesteps:i, :])
trainy.append(dataset[i])
trainx = numpy.array(trainx)
trainy = numpy.array(trainy)
#ADDS ITEMS TO TEST DATASET
for i in range(timesteps, len(dataset1)):
testx.append(dataset1[i-timesteps:i, :])
testy.append(dataset1[i])
testx = numpy.array(testx)
testy = numpy.array(testy)
print(trainx.shape)
#BUILDS AND COMPILES MODEL
mind.add(LSTM(50, return_sequences=True,input_shape=(trainx.shape[1], trainx.shape[2]) ))
mind.add(Dropout(0.6))
mind.add(LSTM(50, return_sequences=True ))
mind.add(Dropout(0.6))
mind.add(LSTM(50, return_sequences=True ))
mind.add(Dropout(0.6))
mind.add(LSTM(50))
mind.add(Dropout(0.6))
mind.add(Dense(1,activation='linear'))
mind.compile(loss='mean_squared_error', optimizer='adam')
os.system('cls')
#SAVE WEIGHTS
#cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,save_weights_only=True,verbose=1)
#TRAINS ALGO
mind.fit(trainx, trainy, epochs=5, batch_size=60)#,callbacks=[cp_callback]
os.system('cls')
#FEED IN TESTX (60 timesteps or days)
#FOR LOOP THAT FEEDS PREDICTED NEW DATA BACK INTO DATASET
#TO GET THE PREDICTED FORCAST
datasettemp = dataset1
for i in range(futuretime):
trainprediction = mind.predict(testx)
testx = []
datasettemp = numpy.append(datasettemp,trainprediction[int(len(trainprediction) - 1)][0])
datasettemp = datasettemp.reshape(datasettemp.shape[0], 1)
print("Predicted Price: "+str(datasettemp[ int(len(datasettemp)-1) ]))
for i in range(timesteps, len(datasettemp)):
testx.append(datasettemp[i-timesteps:i, :])
testx = numpy.array(testx)
#CONVERTS STANDARDIZED DATA TO NORMAL DATA
trainprediction = scaler1.inverse_transform(trainprediction)
datasettocompare = scaler1.inverse_transform(dataset1)
return trainprediction, datasettocompare
#COMPARES TODAY'S ESTIMATED PRICE AND X DAY'S PREDICTED PRICE TO GET
#PREDICTED PRICE MOVEMENT
#BUY AND SELL API
#30 BTCUSD = 1 BTCUSDT
def generate_signature(secret, http_method, url, expires, data):
# parse relative path
parsedURL = urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
if isinstance(data, (bytes, bytearray)):
data = data.decode('utf8')
print("Computing HMAC: %s" % http_method + path + str(expires) + data)
message = http_method + path + str(expires) + data
signature = hmac.new(bytes(secret, 'utf8'), bytes(message, 'utf8'), digestmod=hashlib.sha256).hexdigest()
return signature
file = open("api.txt","r")
keys = file.read()
file.close()
apikey = keys.split(":")[0].strip().replace("\n","").replace("\r","")
apisecret = keys.split(":")[1].strip().replace("\n","").replace("\r","")
def cancelorder(theid):
try:
global apikey
global apisecret
for _ in range(3):
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'DELETE',str("https://api.basefex.com/orders/")+str(theid),expires, '')
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
requests.delete(str("https://api.basefex.com/orders/")+str(theid), headers=hed)
time.sleep(1)
except:
print("Random error, trying again")
def getopentrades(symbol, status, side):
try:
global apikey
global apisecret
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'GET',str('/orders/count?status='+str(status)+'&side='+str(side)+'&symbol='+str(symbol)),expires, '')
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
response = requests.get("https://api.basefex.com/orders/count?status="+str(status)+"&side="+str(side)+"&symbol="+str(symbol), headers=hed)
print(response.text)
orders = str(str(response.text).split('"count":')[1].split("}")[0].strip())
orders = int(orders)
return orders
except:
print("Random error, trying again")
def tradesopen(symbol, side,previousamount):
try:
newamount = getopentrades(symbol,"FILLED",side)
tradeson = None
if newamount > previousamount:
tradeson = True
else:
tradeson = False
return tradeson
except:
print("Random error, trying again")
def tradesnew(symbol, side,previousamount):
try:
newamount = getopentrades(symbol,"NEW",side)
tradeson = None
if newamount < previousamount or int(newamount) == 0:
tradeson = True
else:
tradeson = False
return tradeson
except:
print("Random error, trying again")
def long(symbol,amount, price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'LIMIT',
'side':'BUY',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
response = requests.post("https://api.basefex.com/orders", json=args, headers=hed)
response = response.text
print(response)
time.sleep(3)
numnewbuy = getopentrades(symbol,"NEW","BUY")
numnewsell = getopentrades(symbol,"NEW","SELL")
theid = str(str(response).split('"id":"')[1].split('",')[0].strip().replace("\r","").replace("\n",""))
for _ in range(3):
try:
time.sleep(2)
print("Checking for trade finished")
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
print("long pos: Amount: "+str(amount)+" Symbol: "+str(symbol)+" Price: "+str(price))
return True
except:
print("Error longing, trying again")
time.sleep(3)
for _ in range(10):
try:
print("Error placing order in time. Cancelling")
#Last check before cancelling
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
cancelorder(theid)
for _ in range(3):
time.sleep(2)
print("Checking for trade cancelled")
if tradesnew(symbol,"BUY",numnewbuy) == True and tradesnew(symbol,"SELL",numnewsell) == True:
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
print("Successfully cancelled trade")
return False
except:
print("Error cancelling, trying again")
except:
print("Random error, trying again")
def short(symbol,amount,price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'LIMIT',
'side':'SELL',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
response = requests.post("https://api.basefex.com/orders", json=args, headers=hed)
response = response.text
print(response)
time.sleep(3)
numnewbuy = getopentrades(symbol,"NEW","BUY")
numnewsell = getopentrades(symbol,"NEW","SELL")
theid = str(str(response).split('"id":"')[1].split('",')[0].strip().replace("\r","").replace("\n",""))
for _ in range(3):
try:
time.sleep(2)
print("Checking for trade finished")
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
print("short pos: Amount: "+str(amount)+" Symbol: "+str(symbol)+" Price: "+str(price))
return True
except:
print("Error shorting, trying again")
time.sleep(3)
for _ in range(10):
try:
print("Error placing order in time. Cancelling")
#Last check before cancelling
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
cancelorder(theid)
for _ in range(3):
time.sleep(2)
print("Checking for trade cancelled")
if tradesnew(symbol,"BUY",numnewbuy) == True and tradesnew(symbol,"SELL",numnewsell) == True:
if tradesopen(symbol,"BUY",numbuy) == True or tradesopen(symbol,"SELL",numsell) == True:
return True
print("Successfully cancelled trade")
return False
except:
print("Error cancelling, trying again")
except:
print("Random error, trying again")
def closelong(symbol,amount, price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'MARKET',
'side':'SELL',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
requests.post("https://api.basefex.com/orders", json=args, headers=hed)
return True
except:
print("Random error, trying again")
def closeshort(symbol,amount,price, numbuy, numsell):
try:
global apikey
global apisecret
args = {'size':str(amount),
'symbol':str(symbol),
'type':'MARKET',
'side':'BUY',
'price':str(price)}
timestamp = datetime.now().timestamp()
expires = int(round(timestamp) + 5)
authkey = generate_signature(apisecret,'POST','https://api.basefex.com/orders',expires, json.dumps(args))
hed={'api-expires':str(expires),'api-key':apikey,'api-signature':authkey}
requests.post("https://api.basefex.com/orders", json=args, headers=hed)
return True
except:
print("Random error, trying again")
def getmarketprice(contract):
global apikey
global apisecret
for _ in range(5):
try:
response = requests.get("https://api.basefex.com/instruments/prices")
price = str(str(response.text).split(contract)[1].split(',"price":')[1].split(".")[0])
return int(price)
except Exception as WW:
print("Exception with market price: "+str(WW))
#PREDICTS DATA
def commitpredict(col):
tickerdata = pandas.read_csv('out.csv',usecols=[col,])
tickerdata = DataFrame(tickerdata)
predict,realdata = predictdata(tickerdata)
return predict,realdata
def backpredict(col,daynum):
tickerdata = pandas.read_csv('out.csv',usecols=[col,])
tickerdata = DataFrame(tickerdata)
dataset = DataFrame()
for i in range(1,len(tickerdata)):
if i <= int(len(tickerdata) - daynum - 1):
dataset = dataset.append(tickerdata.iloc[i])
predict,realdata = predictdata(dataset)
return predict,realdata
def makedataset(col,daynum):
tickerdata =
|
pandas.read_csv('out.csv',usecols=[col,])
|
pandas.read_csv
|
# Copyright (c) 2019. yoshida-lab. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Iterable
from copy import copy
import itertools
from multiprocessing import cpu_count
from typing import DefaultDict, List, Sequence, Union, Set
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
from pymatgen.core.composition import Composition as PMGComp
from sklearn.base import TransformerMixin, BaseEstimator
from xenonpy.datatools.preset import preset
from xenonpy.utils import TimedMetaClass, Switch
class BaseFeaturizer(BaseEstimator, TransformerMixin, metaclass=ABCMeta):
"""
Abstract class to calculate features from :class:`pandas.Series` input data.
Each entry can be any format such a compound formula or a pymatgen crystal structure
dependent on the featurizer implementation.
This class have similar structure with `matminer BaseFeaturizer`_ but follow more strict convention.
That means you can embed this feature directly into `matminer BaseFeaturizer`_ class implement.::
class MatFeature(BaseFeaturizer):
def featurize(self, *x):
return <xenonpy_featurizer>.featurize(*x)
.. _matminer BaseFeaturizer: https://github.com/hackingmaterials/matminer/blob/master/matminer/featurizers/base_smc.py
**Using a BaseFeaturizer Class**
:meth:`BaseFeaturizer` implement :class:`sklearn.base.BaseEstimator` and :class:`sklearn.base.TransformerMixin`
that means you can use it in a scikit-learn way.::
featurizer = SomeFeaturizer()
features = featurizer.fit_transform(X)
You can also employ the featurizer as part of a ScikitLearn Pipeline object.
You would then provide your input data as an array to the Pipeline, which would
output the featurers as an :class:`pandas.DataFrame`.
:class:`BaseFeaturizer` also provide you to retrieving proper references for a featurizer.
The ``__citations__`` returns a list of papers that should be cited.
The ``__authors__`` returns a list of people who wrote the featurizer.
Also can be accessed from property ``citations`` and ``citations``.
**Implementing a New BaseFeaturizer Class**
These operations must be implemented for each new featurizer:
- ``featurize`` - Takes a single material as input, returns the features of that material.
- ``feature_labels`` - Generates a human-meaningful name for each of the features. **Implement this as property**.
Also suggest to implement these two **properties**:
- ``citations`` - Returns a list of citations in BibTeX format.
- ``implementors`` - Returns a list of people who contributed writing a paper.
All options of the featurizer must be set by the ``__init__`` function. All
options must be listed as keyword arguments with default values, and the
value must be saved as a class attribute with the same name or as a property
(e.g., argument `n` should be stored in `self.n`).
These requirements are necessary for
compatibility with the ``get_params`` and ``set_params`` methods of ``BaseEstimator``,
which enable easy interoperability with scikit-learn.
:meth:`featurize` must return a list of features in :class:`numpy.ndarray`.
.. note::
None of these operations should change the state of the featurizer. I.e.,
running each method twice should no produce different results, no class
attributes should be changed, unning one operation should not affect the
output of another.
"""
__authors__ = ['anonymous']
__citations__ = ['No citations']
def __init__(
self,
n_jobs: int = -1,
*,
on_errors: str = 'raise',
return_type: str = 'any',
parallel_verbose: int = 0,
):
"""
Parameters
----------
n_jobs
The number of jobs to run in parallel for both fit and predict. Set -1 to use all cpu cores (default).
Inputs ``X`` will be split into some blocks then run on each cpu cores.
When set to 0, input X will be treated as a block and pass to ``Featurizer.featurize`` directly.
on_errors
How to handle the exceptions in a feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
return_type
Specific the return type.
Can be ``any``, ``array`` and ``df``.
``array`` and ``df`` force return type to ``np.ndarray`` and ``pd.DataFrame`` respectively.
If ``any``, the return type dependent on the input type.
Default is ``any``
parallel_verbose
The verbosity level: if non zero, progress messages are printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported. Default ``0``.
"""
self.return_type = return_type
self.n_jobs = n_jobs
self.on_errors = on_errors
self.parallel_verbose = parallel_verbose
self._kwargs = {}
@property
def return_type(self):
return self._return_type
@return_type.setter
def return_type(self, val):
if val not in {'any', 'array', 'df'}:
raise ValueError('`return_type` must be `any`, `array` or `df`')
self._return_type = val
@property
def on_errors(self):
return self._on_errors
@on_errors.setter
def on_errors(self, val):
if val not in {'nan', 'keep', 'raise'}:
raise ValueError('`on_errors` must be `nan`, `keep` or `raise`')
self._on_errors = val
@property
def parallel_verbose(self):
return self._parallel_verbose
@parallel_verbose.setter
def parallel_verbose(self, val):
if not isinstance(val, int):
raise ValueError('`parallel_verbose` must be int')
self._parallel_verbose = val
@property
def n_jobs(self):
return self._n_jobs
@n_jobs.setter
def n_jobs(self, n_jobs):
"""Set the number of threads for this """
if n_jobs < -1:
n_jobs = -1
if n_jobs > cpu_count() or n_jobs == -1:
self._n_jobs = cpu_count()
else:
self._n_jobs = n_jobs
def fit(self, X, y=None, **fit_kwargs):
"""Update the parameters of this featurizer based on available data
Args:
X - [list of tuples], training data
Returns:
self
"""
return self
# todo: Dose fit_transform need to pass paras to transform?
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X, **fit_params)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X, **fit_params)
def transform(self, entries: Sequence, *, return_type=None, **kwargs):
"""
Featurize a list of entries.
If `featurize` takes multiple inputs, supply inputs as a list of tuples.
Args
----
entries: list-like
A list of entries to be featurized.
return_type: str
Specific the return type.
Can be ``any``, ``array`` and ``df``.
``array`` and ``df`` force return type to ``np.ndarray`` and ``pd.DataFrame`` respectively.
If ``any``, the return type depend on the input type.
This is a temporary change that only have effect in the current transform.
Default is ``None`` for no changes.
Returns
-------
DataFrame
features for each entry.
"""
self._kwargs = kwargs
# Check inputs
if not isinstance(entries, Iterable):
raise TypeError('parameter "entries" must be a iterable object')
# Special case: Empty list
if len(entries) is 0:
return []
for c in Switch(self._n_jobs):
if c(0):
# Run the actual featurization
ret = self.featurize(entries, **kwargs)
break
if c(1):
ret = [self._wrapper(x) for x in entries]
break
if c():
ret = Parallel(n_jobs=self._n_jobs,
verbose=self._parallel_verbose)(delayed(self._wrapper)(x) for x in entries)
try:
labels = self.feature_labels
except NotImplementedError:
labels = None
if return_type is None:
return_type = self.return_type
if return_type == 'any':
if isinstance(entries, (pd.Series, pd.DataFrame)):
tmp =
|
pd.DataFrame(ret, index=entries.index, columns=labels)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
with pytest.raises(TypeError):
s_1111 & 'a'
with pytest.raises(TypeError):
s_1111 & ['a', 'b', 'c', 'd']
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
with pytest.raises(TypeError):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
assert_series_equal(result, expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pandas-dev/pandas/issues/5284
with pytest.raises(TypeError):
d.__and__(s, axis='columns')
with pytest.raises(TypeError):
s & d
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
@pytest.mark.parametrize('op', [
operator.and_,
operator.or_,
operator.xor,
])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
dtype=bool)
result = op(ser, idx2)
assert_series_equal(result, expected)
@pytest.mark.parametrize("op, expected", [
(ops.rand_, pd.Index([False, True])),
(ops.ror_, pd.Index([False, True])),
(ops.rxor, pd.Index([])),
])
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
with pytest.raises(TypeError):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
with pytest.raises(TypeError):
t & v
def test_logical_ops_df_compat(self):
# GH#1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
assert_series_equal(s1 & s2, exp)
assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s3 & s4, exp)
assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
class TestSeriesComparisons(object):
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
assert_series_equal(s == s2, exp)
assert_series_equal(s2 == s, exp)
def test_categorical_comparisons(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
assert not (a == 'a').all()
assert ((a != 'a') == ~(a == 'a')).all()
assert not ('a' == a).all()
assert (a == 'a')[0]
assert ('a' == a)[0]
assert not ('a' != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert ((~(a == e) == (a != e)).all())
assert ((~(e == a) == (e != a)).all())
assert ((~(a == f) == (a != f)).all())
assert ((~(f == a) == (f != a)).all())
# non-equality is not comparable
with pytest.raises(TypeError):
a < b
with pytest.raises(TypeError):
b < a
with pytest.raises(TypeError):
a > b
with pytest.raises(TypeError):
b > a
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = ser[5]
f = getattr(operator, op)
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
with pytest.raises(TypeError):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
with pytest.raises(TypeError):
cat < "d"
with pytest.raises(TypeError):
cat > "d"
with pytest.raises(TypeError):
"d" < cat
with pytest.raises(TypeError):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
@pytest.mark.parametrize('pair', [
([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')],
[NaT, NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')],
[NaT, NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), NaT,
pd.Period('2011-03', freq='M')],
[NaT, NaT, pd.Period('2011-03', freq='M')]),
])
@pytest.mark.parametrize('reverse', [True, False])
@pytest.mark.parametrize('box', [Series, Index])
@pytest.mark.parametrize('dtype', [None, object])
def test_nat_comparisons(self, dtype, box, reverse, pair):
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
left < right
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
left.to_frame() == right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() != right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
class TestSeriesFlexComparisonOps(object):
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
assert_series_equal(left.gt(right, fill_value=0), exp)
class TestSeriesOperators(TestData):
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
assert np.isnan(result).all()
result = empty + Series([], index=Index([]))
assert len(result) == 0
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
tm.assert_series_equal(added[:-5], expected)
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
@pytest.mark.parametrize('op, equiv_op, fv', pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all='ignore'):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([np.nan, 1., 2., 3., np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_op_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
assert_series_equal(result, expected)
@pytest.mark.parametrize(
"test_input,error_type",
[
(pd.Series([]), ValueError),
# For strings, or any Series with dtype 'O'
(
|
pd.Series(['foo', 'bar', 'baz'])
|
pandas.Series
|
import pandas as pd
import numpy as np
import gspread_dataframe as gd
import gspread as gs
import setup
import queries
# csv export of historical sales
sales_master = pd.read_csv('Inventory Manager/historical_sales.csv')
# dropping na values, filtering out samples
sales_master = sales_master.dropna()
sales_master = sales_master[sales_master['Sample'] == 'N']
# adding in datetime fields for segmentation
sales_master['Delivery Date'] = pd.to_datetime(sales_master['Delivery Date'])
sales_master['Month'] = sales_master['Delivery Date'].dt.month
sales_master['Year'] = sales_master['Delivery Date'].dt.year
sales_master['Week'] = sales_master['Delivery Date'].dt.isocalendar().week
# limiting data to only directly purchased and managed inventory
sales_master_no_dsw = sales_master[sales_master['Warehouse'] != 'DSW']
# global monthly sales
ind = ['Item Description: Product Family', 'Item Description: Size']
cols = ['Year', 'Month']
monthly_sales_global = pd.pivot_table(sales_master_no_dsw, values='Cases Sold', index=ind, columns=cols, aggfunc=np.sum).reset_index()
monthly_sales_global = monthly_sales_global.fillna(0)
# monthly sales by warehouse
warehouses = ['SBC1', 'CAW1', 'ILL1', 'VAW1']
ind = ['Item Description: Product Family', 'Item Description: Size', 'Warehouse']
cols = ['Year', 'Month']
monthly_sales_wh = pd.pivot_table(sales_master_no_dsw, values='Cases Sold', index=ind, columns=cols, aggfunc=np.sum).reset_index()
monthly_sales_sbc1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[0]].fillna(0)
monthly_sales_caw1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[1]].fillna(0)
monthly_sales_ill1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[2]].fillna(0)
monthly_sales_vaw1 = monthly_sales_wh[monthly_sales_wh['Warehouse'] == warehouses[3]].fillna(0)
# import dfs from queries sheet
tx_global = queries.tx_global
tx_wh_all = queries.tx_wh_all
base_table = queries.base_table
# create list of t-x dataframes for each warehouse, callable based off position in warehouse list
tx_whs = [tx_wh_all[tx_wh_all.index.get_level_values(1) == wh] for wh in warehouses]
# creation of all base templates specifc to each depletion report style
global_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost NJ', 'Total Cases OH', 'NJ Cases OH', 'CA Cases OH',
'Total Cases Committed', 'Total Inv Value', 'NJ Cases on Order', 'Cases on Next Drop', 'Next Drop Date']]
sbc1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost NJ', 'NJ Cases OH', 'NJ Cases Committed',
'NJ Cases Available', 'NJ Inv Value', 'NJ Cases on Order', 'Cases on Next Drop',
'Next Drop Date']]
caw1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost CA', 'CA Cases OH', 'CA Cases Committed',
'CA Cases Available', 'CA Inv Value', 'CA Cases on Order']]
ill1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost IL', 'IL Cases OH', 'IL Cases Comitted',
'IL Cases Available', 'IL Inv Value']]
vaw1_base = base_table[['Product Family', 'Description', 'Current Vintage', 'Country', 'Size',
'Bottles/Case', 'Item Cost VA', 'VA Cases OH', 'VA Cases Committed',
'VA Cases Available', 'VA Inv Value']]
# joining t-x sales data to respective base template
global_report = (global_base.join(tx_global)
.drop('Item_Name__c', axis=1)
.sort_values('Description'))
global_report.iloc[:, -5:] = global_report.iloc[:, -5:].fillna(0)
sbc1 = (sbc1_base.join(tx_whs[0].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
sbc1 = sbc1[(sbc1['NJ Cases OH'] > 0) | (sbc1['NJ Cases on Order'] > 0)]
sbc1.iloc[:, -5:] = sbc1.iloc[:, -5:].fillna(0)
caw1 = (caw1_base.join(tx_whs[1].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
caw1 = caw1[(caw1['CA Cases OH'] > 0) | (caw1['CA Cases on Order'] > 0)]
caw1.iloc[:, -5:] = caw1.iloc[:, -5:].fillna(0)
ill1 = (ill1_base.join(tx_whs[2].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
ill1 = ill1[ill1['IL Cases OH'] > 0]
ill1.iloc[:, -5:] = ill1.iloc[:, -5:].fillna(0)
vaw1 = (vaw1_base.join(tx_whs[3].reset_index(level=1))
.drop(['Warehouse__c', 'Item_Name__c'], axis=1)
.sort_values('Description'))
vaw1 = vaw1[vaw1['VA Cases OH'] > 0]
vaw1.iloc[:, -5:] = vaw1.iloc[:, -5:].fillna(0)
inv_reports = [global_report, sbc1, caw1, ill1, vaw1]
global_report['Months Inv OH'] = ((global_report['Total Cases OH']
- global_report['Total Cases Committed'])
/ global_report['Cases Sold: T-30'])
sbc1['Months Inv OH'] = (sbc1['NJ Cases Available'] / sbc1['Cases Sold: T-30']).round(1)
caw1['Months Inv OH'] = (caw1['CA Cases Available'] / caw1['Cases Sold: T-30']).round(1)
ill1['Months Inv OH'] = (ill1['IL Cases Available'] / ill1['Cases Sold: T-30']).round(1)
vaw1['Months Inv OH'] = (vaw1['VA Cases Available'] / vaw1['Cases Sold: T-30']).round(1)
for df in inv_reports:
df['Months Inv OH'] = df['Months Inv OH'].replace([np.inf, -np.inf], np.nan).round(1)
df.reset_index(inplace=True)
# joining all historical monthly sales data to reports
## global master
global_joined = global_report.merge(monthly_sales_global, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
global_master = global_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', '')], axis=1)
## sbc1 master
sbc1_joined = sbc1.merge(monthly_sales_sbc1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
sbc1_master = sbc1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
## caw1 master
caw1_joined = caw1.merge(monthly_sales_caw1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
caw1_master = caw1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
## ill1 master
ill1_joined = ill1.merge(monthly_sales_ill1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
ill1_master = ill1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
## vaw1 master
vaw1_joined = vaw1.merge(monthly_sales_vaw1, how='left', left_on=['Product Family', 'Size'],
right_on=['Item Description: Product Family', 'Item Description: Size'])
vaw1_master = vaw1_joined.drop([('Item Description: Product Family', ''), ('Item Description: Size', ''),
('Warehouse', '')], axis=1)
# list of master inventory reports to perform final modifications on
master_dfs = [global_master, sbc1_master, caw1_master, ill1_master, vaw1_master]
# function list to modify final reports
## function to subtract X amount of months from current date, returns tuple of (year, month)
def month_sbtrkt(months_from_today):
year = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).year
month = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).month
return (year, month)
## function to print month name and year
def month_namer(months_from_today):
year = (pd.Timestamp.today() + pd.tseries.offsets.DateOffset(months=months_from_today)).year
month = (
|
pd.Timestamp.today()
|
pandas.Timestamp.today
|
"""
File for additional tools developed by QCI team
"""
import pandas as pd
import itertools as it
import numpy as np
import h5py
import itertools as it
from scipy import constants as sc
from scipy import integrate as si
from em_simulations.results import network_data as nd
from pyEPR import ansys
def get_cross_kerr_table(epr, swp_variable, numeric):
"""
Function to re-organize the cross-Kerr results once the quantum analysis is finished
Parameters:
-------------------
epr : Object of QuantumAnalysis class
swp_variable : the variable swept in data according to which things will be sorted
numeric : Whether numerical diagonalization of the data was performed
Use notes:
-------------------
* It is assumed the epr.analyze_all_variations has already been called and analysis is finished.
"""
if numeric:
f1 = epr.results.get_frequencies_ND(vs=swp_variable)
chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable)
else:
f1 = epr.results.get_frequencies_O1(vs=swp_variable)
chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable)
#print(f1)
#print(chis)
swp_indices = chis.index.levels[0]
mode_indices = chis.index.levels[1]
#print(mode_indices)
mode_combinations = list(zip(mode_indices,mode_indices))
diff_mode_combinations = list(it.combinations_with_replacement(mode_indices,2))
mode_combinations.extend(diff_mode_combinations)
organized_data = pd.DataFrame({swp_variable:swp_indices})
organized_data.set_index(swp_variable,inplace=True)
for mode_indx in mode_indices:
organized_data['f_'+str(mode_indx)+'(GHz)']=np.round(f1.loc[mode_indx].values/1000,3)
for combo_indx in mode_combinations:
temp_chi_list = [chis.loc[swp_indx].loc[combo_indx] for swp_indx in swp_indices]
organized_data['chi_'+str(combo_indx[0])+str(combo_indx[1])+' (MHz)']=np.round(temp_chi_list,4)
return organized_data
def analyze_sweep_no_junctions(epr_hfss):
modes = range(epr_hfss.n_modes)
variations = epr_hfss.variations
all_data = []
for variation in variations:
print(f'\n Analyzing variation: ',variation)
freqs_bare_GHz, Qs_bare = epr_hfss.get_freqs_bare_pd(variation, frame=False)
SOL = [] #pd.DataFrame()
for mode in modes:
print('\n'f' \033[1mMode {mode} at {"%.2f" % freqs_bare_GHz[mode]} GHz [{mode+1}/{epr_hfss.n_modes}]\033[0m')
epr_hfss.set_mode(mode,FieldType='EigenStoredEnergy')
print(' Calculating ℰ_magnetic', end=',')
epr_hfss.U_H = epr_hfss.calc_energy_magnetic(variation)
print('ℰ_electric')
epr_hfss.U_E = epr_hfss.calc_energy_electric(variation)
sol = pd.Series({'Frequency':freqs_bare_GHz[mode],'U_H': epr_hfss.U_H, 'U_E': epr_hfss.U_E})
epr_hfss.omega = 2*np.pi*freqs_bare_GHz[mode]
for seam in epr_hfss.pinfo.dissipative.seams:
sol=sol.append(epr_hfss.get_Qseam(seam, mode, variation))
SOL.append(sol)
SOL = pd.DataFrame(SOL)
all_data.append(SOL)
display(SOL)
all_data = pd.concat(all_data,keys=variations)
return all_data
def analyze_sweep_cavity_loss(epr_hfss):
modes = range(epr_hfss.n_modes)
variations = epr_hfss.variations
all_data = []
for variation in variations:
print(f'\n Analyzing variation: ',variation)
freqs_bare_GHz, Qs_bare = epr_hfss.get_freqs_bare_pd(variation, frame=False)
SOL = []
for mode in modes:
print('\n'f'Mode {mode} at {"%.2f" % freqs_bare_GHz[mode]} GHz [{mode+1}/{epr_hfss.n_modes}]')
epr_hfss.set_mode(mode,FieldType='EigenStoredEnergy')
print('Calculating ℰ_magnetic', end=',')
epr_hfss.U_H = epr_hfss.calc_energy_magnetic(variation)
print('ℰ_electric')
epr_hfss.U_E = epr_hfss.calc_energy_electric(variation)
sol = pd.Series({'Frequency':freqs_bare_GHz[mode],'U_H': epr_hfss.U_H, 'U_E': epr_hfss.U_E})
epr_hfss.omega = 2*np.pi*freqs_bare_GHz[mode]
for seam in epr_hfss.pinfo.dissipative.seams:
sol=sol.append(epr_hfss.get_Qseam(seam, mode, variation))
for MA_surface in epr_hfss.pinfo.dissipative.dielectric_MA_surfaces:
sol=sol.append(epr_hfss.get_Qdielectric_MA_surface(MA_surface, mode, variation))
for resistive_surface in epr_hfss.pinfo.dissipative.resistive_surfaces:
sol=sol.append(epr_hfss.get_Qcond_surface(resistive_surface, mode, variation))
SOL.append(sol)
SOL =
|
pd.DataFrame(SOL)
|
pandas.DataFrame
|
import sklearn
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn import tree
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.externals.six import StringIO
import pydot
# In[13]:
df = load_breast_cancer()
df = pd.DataFrame(np.c_[df['data'], df['target']],
columns= np.append(df['feature_names'], ['target']))
for col in df.columns:
print(col)
print(df.head())
total_rows=len(df.axes[0])
print(total_rows)
# Outlier detection and visualization
# In[3]:
histograms = df.hist()
df.hist("target")
# In[2]:
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size = .2)
# In[3]:
#PCA with scikit learn
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train_pca = pca = PCA().fit(X_train)
X_test_pca = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
# In[4]:
plot = 1
# plot explained variance
if plot == 1:
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Breast Cancer data set Explained Variance')
plt.savefig('foo.png')
plt.show()
# In[5]:
print(np.cumsum(pca.explained_variance_ratio_))
# Selecting the amount of principle components
# In[6]:
# 10 features
pca = PCA(n_components=10)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.fit_transform(X_test)
# In[7]:
# baseline linear model
reg = LogisticRegression(random_state=0).fit(X_train, y_train)
prediction = reg.predict(X_test)
score = reg.score(X_test,y_test)
print(score)
reg_pca = LogisticRegression(random_state=0).fit(X_train_pca, y_train)
score_pca = reg_pca.score(X_test_pca,y_test)
print(score_pca)
# In[8]:
LPM = linear_model.LinearRegression()
LPM = LPM.fit(X_train, y_train)
LPM.coef_
predictionLPM = LPM.predict(X_test)
scoreLPM = LPM.score(X_test, y_test)
print(scoreLPM)
LPMpca = linear_model.LinearRegression()
LPMpca = LPMpca.fit(X_train_pca, y_train)
LPMpca.coef_
predictionLPM = LPMpca.predict(X_test_pca)
scoreLPMpca = LPMpca.score(X_test_pca, y_test)
print(scoreLPMpca)
# In[9]:
#baseline decicision tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
tree.export_graphviz(clf, out_file='tree.dot')
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontree.pdf")
predictionBaseline = clf.predict(X_test)
scoreclf = clf.score(X_test, y_test)
#print(classification_report(y_test,predictionBaseline,target_names=['malignant', 'benign']))
print(scoreclf)
#baseline decicision tree
clfPca = tree.DecisionTreeClassifier()
clfPca = clfPca.fit(X_train_pca, y_train)
tree.export_graphviz(clfPca, out_file='treepca.dot')
dot_data = StringIO()
tree.export_graphviz(clfPca, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontreepca.pdf")
predictionBaselinePca = clfPca.predict(X_test_pca)
scoreclf = clfPca.score(X_test_pca, y_test)
#print(classification_report(y_test,predictionBaselinePca,target_names=['malignant', 'benign']))
print(scoreclf)
# In[18]:
# KNN classifier on original data
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(X_train, y_train)
score = knn.score(X_test,y_test)
print(score)
knn.fit(X_train_pca, y_train)
score_pca = knn.score(X_test_pca,y_test)
print(score_pca)
# In[14]:
# Decision tree with Gridsearch
clf = tree.DecisionTreeClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'max_depth': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
clf_gscv = GridSearchCV(clf, param_grid, cv=10)
#fit model to data
clf_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(clf_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(clf_gscv.best_score_)
# In[15]:
#KNN with PCA or without PCA and Gridsearch
knn2 = KNeighborsClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'n_neighbors': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
knn_gscv = GridSearchCV(knn2, param_grid, cv=5)
#fit model to data
knn_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(knn_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(knn_gscv.best_score_)
# In[32]:
## Plot results from gridsearches
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results =
|
pd.DataFrame(cv_results)
|
pandas.DataFrame
|
# Import libraries | Standard
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
import os
import datetime
import warnings
warnings.filterwarnings("ignore") # ignoring annoying warnings
from time import time
from rich.progress import track
# Import libraries | Visualization
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# Import libraries | Sk-learn
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error
from sklearn.metrics.scorer import make_scorer
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
import xgboost as xgb
from lightgbm import LGBMRegressor
# udf function
from util_func import distribution
def read_data(file):
features = pd.read_csv('../raw_data/'+ file[0])
train = pd.read_csv('../raw_data/'+ file[1])
stores = pd.read_csv('../raw_data/'+ file[2])
test = pd.read_csv('../raw_data/'+ file[3])
return features,train,stores,test
filename = ["features.csv","train.csv","stores.csv","test.csv"]
stores = read_data(filename)[2]
features = read_data(filename)[0]
train = read_data(filename)[1]
test = read_data(filename)[3]
#################################################################################### 数据预处理
#################################################################################### (1) 缺失值异常值处理
#################################################################################### stores
# 异常门店信息(含索引)
print(stores[stores['Store'].isin([3,5,33,36])].index)
# index [2,4,32,35] type = 'C'
stores.iloc[2,1] = stores.iloc[4,1] = stores.iloc[32,1] = stores.iloc[35,1] = 'C'
#################################################################################### features
# Features Data | Negative values for MarkDowns
features['MarkDown1'] = features['MarkDown1'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown2'] = features['MarkDown2'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown3'] = features['MarkDown3'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown4'] = features['MarkDown4'].apply(lambda x: 0 if x < 0 else x)
features['MarkDown5'] = features['MarkDown5'].apply(lambda x: 0 if x < 0 else x)
# Features Data | NaN values for multiple columns
for i in track(range(len(features))):
if features.iloc[i]['Date'] == '2013-04-26':
CPI_new = features.iloc[i]['CPI']
Unemployment_new = features.iloc[i]['Unemployment']
if np.isnan(features.iloc[i]['CPI']):
features.iat[i, 9] = CPI_new
features.iat[i, 10] = Unemployment_new
# Columns: MarkDown1, MarkDown2, MarkDown3, MarkDown4 & MarkDown5
features['Week'] = 0
for i in track(range(len(features))):
features.iat[i, 12] = datetime.date(
int(features.iloc[i]['Date'][0:4]),
int(features.iloc[i]['Date'][5:7]),
int(features.iloc[i]['Date'][8:10])
).isocalendar()[1]
# missing data for 2012 & 2013
features['Year'] = features['Date'].str.slice(start=0, stop=4)
total = features[features['Year'].isin(['2012','2013'])].isnull().sum().sort_values(ascending=False)
percent = (features[features['Year'].isin(['2012','2013'])].isnull().sum()/
features[features['Year'].isin(['2012','2013'])].isnull().count()
).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
print(missing_data.head())
# Iterate through stores
for i in track(range(1, len(features['Store'].unique()))):
# For 2010, iterate through weeks 5 thru 52
for j in range(5, 52):
idx = features.loc[(features.Year == '2010') & (features.Store == i) & (features.Week == j),['Date']].index[0]
features.iat[idx, 4] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown1']].values[0]
features.iat[idx, 5] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown2']].values[0]
features.iat[idx, 6] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown3']].values[0]
features.iat[idx, 7] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown4']].values[0]
features.iat[idx, 8] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown5']].values[0]
# For 2011, iterate through weeks 1 thru 44
for j in range(1, 44):
idx = features.loc[(features.Year == '2011') & (features.Store == i) & (features.Week == j),['Date']].index[0]
features.iat[idx, 4] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown1']].values[0]
features.iat[idx, 5] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown2']].values[0]
features.iat[idx, 6] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown3']].values[0]
features.iat[idx, 7] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown4']].values[0]
features.iat[idx, 8] = features.loc[(features.Year == '2012') & (features.Store == i) & (features.Week == j),['MarkDown5']].values[0]
features.drop(columns=['Year'], axis=1, inplace=True)
features.fillna(0, inplace=True)
#################################################################################### train
# Train Data | Negative Values for Weekly Sales
train['Weekly_Sales'] = train['Weekly_Sales'].apply(lambda x: 0 if x < 0 else x)
#################################################################################### (2) 合并数据集
# Merge the following datasets:
# Stores + Features + Train
# Stores + Features + Test
# Remove duplicate columns from each dataset
train = pd.merge(train, stores, how='left', on=['Store'])
train = pd.merge(train, features, how='left', on=['Store','Date'])
test =
|
pd.merge(test, stores, how='left', on=['Store'])
|
pandas.merge
|
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from pandas import DataFrame
from pandas import concat
from itertools import chain
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
def get_train_valid_test_set(url, chunk_size_x):
data_frame = pd.read_csv(url)
data_set = data_frame.iloc[:, 1:2].values
data_set = data_set.astype('float64')
# sc = MinMaxScaler(feature_range=(0, 1))
# train_data_set = sc.fit_transform(data_set)
train_data_set = np.array(data_set)
reframed_train_data_set = np.array(series_to_supervised(train_data_set, chunk_size_x, 1).values)
# 数据集划分,选取前60%天的数据作为训练集,中间20%天作为验证集,其余的作为测试集
train_days = int(len(reframed_train_data_set) * 0.6)
valid_days = int(len(reframed_train_data_set) * 0.2)
train = reframed_train_data_set[:train_days, :]
valid = reframed_train_data_set[train_days:train_days + valid_days, :]
test = reframed_train_data_set[train_days + valid_days:, :]
# test_data是用于计算correlation与spearman-correlation而存在
test_data = train_data_set[train_days + valid_days + chunk_size_x:, :]
train_x, train_y = train[:, :-1], train[:, -1]
valid_x, valid_y = valid[:, :-1], valid[:, -1]
test_x, test_y = test[:, :-1], test[:, -1]
# 将数据集重构为符合LSTM要求的数据格式,即 [样本,时间步,特征]
train_x = train_x.reshape((train_x.shape[0], chunk_size_x, 1))
valid_x = valid_x.reshape((valid_x.shape[0], chunk_size_x, 1))
test_x = test_x.reshape((test_x.shape[0], chunk_size_x, 1))
return train_x, train_y, valid_x, valid_y, test_x, test_y, test_data, reframed_train_data_set
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg =
|
concat(cols, axis=1)
|
pandas.concat
|
import pandas as pd
from broker import brkrs
from utils.logger import Lgr
from utils.cfg import Settings
####### 打印输出账户信息 #######
class Output():
def __init__(self):
self.curr_showing_porder_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from typing import Any
import numpy as np
import pandas as pd
from mlflow.exceptions import MlflowException
from mlflow.types import DataType
from mlflow.types.schema import Schema, ColSpec
class TensorsNotSupportedException(MlflowException):
def __init__(self, msg):
super().__init__("Multidimensional arrays (aka tensors) are not supported. "
"{}".format(msg))
def _infer_schema(data: Any) -> Schema:
"""
Infer an MLflow schema from a dataset.
This method captures the column names and data types from the user data. The signature
represents model input and output as data frames with (optionally) named columns and data
type specified as one of types defined in :py:class:`DataType`. This method will raise
an exception if the user data contains incompatible types or is not passed in one of the
supported formats (containers).
The input should be one of these:
- pandas.DataFrame or pandas.Series
- dictionary of { name -> numpy.ndarray}
- numpy.ndarray
- pyspark.sql.DataFrame
The element types should be mappable to one of :py:class:`mlflow.models.signature.DataType`.
NOTE: Multidimensional (>2d) arrays (aka tensors) are not supported at this time.
:param data: Dataset to infer from.
:return: Schema
"""
if isinstance(data, dict):
res = []
for col in data.keys():
ary = data[col]
if not isinstance(ary, np.ndarray):
raise TypeError("Data in the dictionary must be of type numpy.ndarray")
dims = len(ary.shape)
if dims == 1:
res.append(ColSpec(type=_infer_numpy_array(ary), name=col))
else:
raise TensorsNotSupportedException("Data in the dictionary must be 1-dimensional, "
"got shape {}".format(ary.shape))
return Schema(res)
elif isinstance(data, pd.Series):
return Schema([ColSpec(type=_infer_numpy_array(data.values))])
elif isinstance(data, pd.DataFrame):
return Schema([ColSpec(type=_infer_numpy_array(data[col].values), name=col)
for col in data.columns])
elif isinstance(data, np.ndarray):
if len(data.shape) > 2:
raise TensorsNotSupportedException("Attempting to infer schema from numpy array with "
"shape {}".format(data.shape))
if data.dtype == np.object:
data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import os
import matplotlib.pyplot as plt
import sys
#This script will process two input sequencing files for mRNA
#and DNA into a data set for all genes
# The four inline arguments passed to this script are
# 1: file name of mRNA sequencing .fastq file.
# 2: file name of DNA sequencing .fastq file.
# 3: output name prefix. The output name for each gene file will be given
# as gene_name + this_input + 'dataset'
# 4 group number. Genes are separated into 18 groups, labeled 101 to 118.
# Only those genes in the given group number will have their datasets generated
# by this script. The associated group number to gene association is given
# by the genetogroupnum file.
name = sys.argv[1]
nameplas = sys.argv[2]
barcode_length = 20
trailing_sequence_length = 21
#define needed functions
def comb_tag(s):
'''function to combine mutated sequence with barcode'''
return s['seq'] + s['tag']
#set no maximum length on output column size.
pd.set_option('max_colwidth',int(1e8))
#load in dataframe version of mRNA sequences.
df = pd.io.parsers.read_csv(name,header=None)
#extract the sequences from the fastq format.
df = df.loc[1::4]
#we will select out the barcodes from each sequence. They will be located
#from -41 to -21 bases from the end of the sequence.
tags = df[0].str.slice(-trailing_sequence_length - barcode_length,-trailing_sequence_length)
#we will get the numbers of each barcode.
tagcounts = tags.value_counts()
#We will now preform an identical procedure for the DNA sequences.
dfplas = pd.io.parsers.read_csv(nameplas,header=None)
dfplas = dfplas.loc[1::4]
tagsplas = dfplas[0].str.slice(-trailing_sequence_length - barcode_length,-trailing_sequence_length)
tagcountsplas = tagsplas.value_counts()
#we will get the genes for the associated group number. This is generally 6
#genes.
#load in key for group number for each gene
genecodes = pd.io.parsers.read_csv('../data/test_data/genetogroupnum')
#use group code to find the genes we need to make datasets for.
gene = list(sys.argv[5])
#load in the file that relates barcode to mutated sequence.
tagkeyname = sys.argv[3]
tagkey =
|
pd.io.parsers.read_csv(tagkeyname,delim_whitespace=True)
|
pandas.io.parsers.read_csv
|
import pandas as pd
import gzip
import re
import os
import argparse
import sys
class GDF:
"""
We are assuming single sample GDF
"""
def __init__(self, filename):
self.data = pd.read_csv(filename, sep="\t")
self.data_cols = self.data.columns.values
pos_df = pd.DataFrame(
self.data.Locus.apply(lambda x: x.split(":")).to_list(),
columns=["CHROM", "POS"]
)
pos_df.POS = pos_df.POS.astype('int64')
self.data = pd.concat([self.data, pos_df], axis=1)
def rsid_per_position(self, target_bed):
def _annotate(x, targets):
try:
ids = targets[(targets.CHROM == x.CHROM) &
(targets.START <= x.POS) &
(x.POS <= targets.END)].ID.to_list()
return ", ".join(ids)
except IndexError:
return "-"
targets = pd.read_csv(
target_bed, sep="\t",
names=["CHROM", "START", "END", "ID", "GENE"]
)
targets["save"] = targets.START
idx_swap = targets.START > targets.END
targets.loc[idx_swap, "START"] = targets.loc[idx_swap, "END"]
targets.loc[idx_swap, "END"] = targets.loc[idx_swap, "save"]
targets["CHROM"] = targets.CHROM.apply(lambda x: f"chr{x}")
self.data["ID"] = self.data.apply(lambda x: _annotate(x, targets), axis=1)
def write_proccessed_gdf(self, filename, annotate=True):
if annotate:
self.data.to_csv(filename, sep="\t", index=False)
else:
self.data.to_csv(filename, sep="\t", columns=self.data_cols, index=False)
class VCF:
"""
We are assuming single sample VCF
"""
def __init__(self, filename):
self.meta = []
self.data = pd.DataFrame()
self.original_header = []
self.read_vcf(filename)
def read_vcf(self, filename):
if ".gz" in filename:
f = gzip.open(filename, "rt")
else:
f = open(filename, "r")
lines = f.readlines()
lines = [l.strip() for l in lines]
i = None
for i, line in enumerate(lines):
if re.search("^#CHROM", line):
break
if i is None:
raise ImportError("No lines in: " + filename)
self.meta = lines[:i - 1]
data = [l.split("\t") for l in lines[i:]]
self.data =
|
pd.DataFrame(data[1:], columns=data[0])
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.