prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# CacheIntervals: Memoization with interval parameters
#
# Copyright (C) <NAME>
#
# This file is part of CacheIntervals.
#
# @author = '<NAME>'
# @email = '<EMAIL>'
import logging
from functools import reduce
import loguru
import numpy as np
import pandas as pd
import pendulum as pdl
import sqlite3
import time
import klepto
from datetime import date, datetime
from CacheIntervals import MemoizationWithIntervals
from CacheIntervals.utils.Timer import Timer
name_db_file_test1 = "../test/test1.sqlite"
delay = 2
def get_records(conn, name_table, period = pd.Interval(pd.Timestamp(2021, 1,1), pd.Timestamp(2021, 1, 31))):
time.sleep(delay)
query = f"Select * From {name_table} Where date(date) between date('{period.left.date()}') and date('{period.right.date()}')"
#query = f'Select * From {name_table} '
loguru.logger.debug(query)
df = pd.read_sql(query, conn)
return df
cache_itvls =MemoizationWithIntervals(
[], ['period'],
aggregation=pd.concat,
debug=True,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
get_records_cached = cache_itvls(get_records)
cache_itvls_concat_with_tolerance = MemoizationWithIntervals(
[], ['period'],
aggregation=pd.concat,
debug=False,
memoization=klepto.lru_cache(
maxsize=500,
cache=klepto.archives.dict_archive(),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)),
rounding = pdl.today()-pdl.yesterday()
)
get_records_cached_with_tolerance_1day = cache_itvls_concat_with_tolerance(get_records)
def caching_with_tolerance():
with Timer() as timer_no_cache:
df_jan = get_records(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 1, 31)))
# activate caching
get_records_cached_with_tolerance_1day(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 1, 1),
pd.Timestamp(2021, 1, 31)))
df_jan_cached = None
with Timer() as timer_cache:
df_jan_cached = get_records_cached_with_tolerance_1day(cnx_file, "test1",
pd.Interval(pd.Timestamp(2021, 1, 1), pd.Timestamp(2021, 2, 1)))
loguru.logger.debug(f'\n{df_jan_cached.sort_values(by="date")}')
assert timer_cache.interval < timer_no_cache.interval
def accesss_cached_function():
get_records_cached(cnx_file, "test1", pd.Interval(pd.Timestamp(2021, 2, 1), | pd.Timestamp(2021, 3, 31) | pandas.Timestamp |
'''
Pulls data from xml and creates an array for each user consisting of PMID,
type, and annotation. Uses NLTK scoring metrics tools to determine
precision, recall, and f-score. By including PMID in the hash, this version
allows for examining user to user comparisons across multiple documents in the
group. Averages by User in one shot, instead of an average of averages.
Uses userid instead of user_name. Treats one of the users as the test set, the
other user as the gold standard for each pairing.
'''
from django.contrib.auth.models import User
from django.conf import settings
from ..common.formatter import clean_df
from ..common.models import Group
from ..document.models import Document
from .models import Report
from . import synonyms_dict
from nltk.metrics import scores as nltk_scoring
import pandas as pd
import networkx as nx
import itertools
def hashed_er_annotations_df(group_pk, compare_type=True):
"""Generate a Entity Recognition DataFrame with additional hash column
"""
group = Group.objects.get(pk=group_pk)
org_er_df = Document.objects.ner_df(document_pks=group.get_document_pks(), include_pubtator=False)
er_df = clean_df(org_er_df)
if compare_type:
er_df['hash'] = er_df.document_pk.apply(str) + '_' + er_df.ann_type_idx.apply(str) + '_' + er_df.section_offset.apply(str) + '_' + er_df.length.apply(str)
else:
er_df['hash'] = er_df.document_pk.apply(str) + '_' + er_df.section_offset.apply(str) + '_' + er_df.length.apply(str)
return er_df
def compute_pairwise(hashed_er_anns_df):
"""
Returns pairwise comparision between users (uesr_a & user_b)
that have completed similar documents
"""
# Make user_pks unique
userset = set(hashed_er_anns_df.user_id)
inter_annotator_arr = []
# For each unique user comparision, compute
for user_a, user_b in itertools.combinations(userset, 2):
# The list of document_pks that each user had completed
user_a_set = set(hashed_er_anns_df[hashed_er_anns_df['user_id'] == user_a].document_pk)
user_b_set = set(hashed_er_anns_df[hashed_er_anns_df['user_id'] == user_b].document_pk)
# Only compare documents both users have completed
pmid_set = user_a_set.intersection(user_b_set)
# If user_a and user_b have completed shared PMID, compute comparisions
if len(pmid_set) != 0:
pmid_df = hashed_er_anns_df[hashed_er_anns_df['document_pk'].isin(pmid_set)]
ref_set = set(pmid_df[pmid_df['user_id'] == user_a].hash)
test_set = set(pmid_df[pmid_df['user_id'] == user_b].hash)
# Compute the precision, recall and F-measure based on
# the unique hashes
inter_annotator_arr.append((
user_a,
user_b,
len(pmid_set),
nltk_scoring.precision(ref_set, test_set),
nltk_scoring.recall(ref_set, test_set),
nltk_scoring.f_measure(ref_set, test_set)
))
return | pd.DataFrame(inter_annotator_arr, columns=('user_a', 'user_b', 'docs_compared', 'precision', 'recall', 'f-score')) | pandas.DataFrame |
# views.py
from flask import Flask, render_template, Response
from flask_basicauth import BasicAuth
from flask import redirect, url_for, request, flash
import pandas as pd
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import boto3
import numbers
import string
import nltk
import json
import joblib
import io
import os
import random
from matplotlib import pyplot as plt
import numpy as np
from app import app
app.config['BASIC_AUTH_USERNAME'] = os.environ['SHAKEUSER']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['SHAKEPASS']
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
@app.route('/secret')
@basic_auth.required
def secret_view():
return render_template('secret.html')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
pipeline = joblib.load('pipeline.joblib')
NOS = pd.read_excel('NOS_database_table_of_contents.xls', sheet_name='phase1_table_of_contents')
def es_ngram_no_of_hits_query(es, ngram, collocation):
que = { "from" : 0, "size" : 1,
"_source" : {
"includes" : [""]
},
"query":
{
"multi_match" :
{ "query" : ngram,
"fields" : ["Text", "Title"],
"type" : "phrase",
"slop" : collocation
}
}
}
res = es.search(index='test_index', body = que, request_timeout=1000)
return res
def es_ngram_query(es, ngram, collocation, size=100):
que = { "from" : 0, "size" : size,
"_source" : {
"includes" : ["TCP_ID"]
},
"query":
{
"multi_match" :
{ "query" : ngram,
"fields" : ["Text", "Title"],
"type" : "phrase",
"slop" : collocation
}
},
"highlight":
{
"fields":
{
"Text": {},
"Title": {}
}
}
}
res = es.search(index='test_index', body = que, request_timeout=600)
return res
def es_query_author(es, low_year, high_year, author_arr, ngram, collocation, size, include_year_unknown, tcpid="None", timeout=600):
low = -10000
high = 10000
quer = "should"
if (not include_year_unknown):
low = 10000
high = -10000
quer = "must"
if len(author_arr) < 15:
length = len(author_arr)
for i in range(15-length):
author_arr.append("")
que = {"from" : 0, "size" : size,
"_source" : {
"includes" : ["TCP_ID"]
},
"query" : {
"bool": {
"must": [{
"bool" : {
"must_not" :
{ "match" : {"TCP_ID": tcpid} }
}}, {
"bool": {
quer : [
{"range" :{"Year": {"gte": low_year, "lte": high_year}}},
{"bool" : {"must_not":[{"range" : {"Year":{"gte":low, "lte":high}}}] }}
]
}}, {
"bool": {
"must" :
{
"bool": {
"should" :[
{"match": {"Author" : author_arr[0]}},
{"match": {"Author" : author_arr[1]}},
{"match": {"Author" : author_arr[2]}},
{"match": {"Author" : author_arr[3]}},
{"match": {"Author" : author_arr[4]}},
{"match": {"Author" : author_arr[5]}},
{"match": {"Author" : author_arr[6]}},
{"match": {"Author" : author_arr[7]}},
{"match": {"Author" : author_arr[8]}},
{"match": {"Author" : author_arr[9]}},
{"match": {"Author" : author_arr[10]}},
{"match": {"Author" : author_arr[11]}},
{"match": {"Author" : author_arr[12]}},
{"match": {"Author" : author_arr[13]}},
{"match": {"Author" : author_arr[14]}}
]
}
}
}
}],
"filter":
{
"multi_match":
{
"query": ngram,
"fields" : ["Text", "Title"],
"type" : "phrase",
"slop" : collocation
}
}
}
},
"highlight":
{
"fields":
{
"Text": {},
"Title": {}
}
}
}
res = es.search(index='test_index', body = que, request_timeout=timeout)
return res
def es_query_no_author(es, low_year, high_year, ngram, collocation, size, include_year_unknown, tcpid="None", timeout=600):
low = -10000
high = 10000
quer = "should"
if (not include_year_unknown):
low = 10000
high = -10000
quer = "must"
que = {"from" : 0, "size" : size,
"_source" : {
"includes" : ["TCP_ID"]
},
"query" : {
"bool": {
"must": [{
"bool" : {
"must_not" :
{ "match" : {"TCP_ID": tcpid} }
}}, {
"bool": {
quer : [
{"range" :{"Year": {"gte": low_year, "lte": high_year}}},
{"bool" : {"must_not":[{"range" : {"Year":{"gte":low, "lte":high}}}] }}
]
}
}],
"filter":
{
"multi_match":
{
"query": ngram,
"fields" : ["Text", "Title"],
"type" : "phrase",
"slop" : collocation
}
}
}
},
"highlight":
{
"fields":
{
"Text": {},
"Title": {}
}
}
}
res = es.search(index='test_index', body = que, request_timeout=timeout)
return res
def ngrammer(paragraph, n):
"""Extracts all ngrams of length n words from given text"""
paragraph = paragraph.translate(str.maketrans('', '', string.punctuation))
ngram = nltk.ngrams(paragraph.split(), n)
strs = []
for grams in ngram:
stri = ' '.join(grams)
strs.append(stri)
return list(set(strs))
def check_status(endpoint):
service = 'es'
region = 'us-west-2'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
es = Elasticsearch( hosts=[endpoint], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection)
if(es.ping()):
return es
else:
return False
@app.route('/')
def home():
return render_template('titlepage.html')
@app.route('/attribution', methods=['POST', 'GET'])
def attribution():
return render_template('attribution.html')
@app.route('/attributionresult', methods=['POST', 'GET'])
def attributionresult():
if request.method == 'POST':
text = request.form['paragraph']
result = pipeline.predict([text])[0]
probability = '%.2f'%(max(pipeline.predict_proba([text])[0])*100)
prob_array = pipeline.predict_proba([text])[0]
author_list = ['<NAME>','<NAME>','<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', 'Shakespeare', '<NAME>']
y_pos = np.arange(len(author_list))
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(1,1,1)
ax.bar(y_pos, prob_array, align='center', alpha=0.5)
ax.set_xticks(np.arange(len(author_list)))
ax.tick_params(labelsize=15)
ax.set_xticklabels(author_list, rotation=60, fontsize=15)
ax.set_xlabel('Authors', fontsize=20)
ax.set_ylabel('Probability', fontsize=20)
plt.tight_layout()
plt.savefig('./app/static/attrplot.png')
return render_template("attributionresult.html",result = result, prob = probability, para=text)
else:
return render_template("attribution.html")
@app.route('/ngramsearch', methods=['POST', 'GET'])
def ngramsearch():
return render_template('ngramsearch.html')
@app.route('/ngramsearchresult', methods=['POST', 'GET'])
def ngramsearchresult():
if (request.method == 'POST'):
# get all values from form
paragraph = request.form['paragraph']
if (len(paragraph) == 0):
return render_template('ngramsearch.html')
exclude_TCP = request.form['tcpid']
ngram_low = int(request.form['ngram1'])
ngram_high = int(request.form['ngram2'])
year_low = int(request.form['year1'])
year_high = int(request.form['year2'])
hits_low = int(request.form['hit1'])
hits_high = int(request.form['hit2'])
collocation = int(request.form['collocationdist'])
unknown_year = request.form['yearunknown']
if (unknown_year == 'yes'):
unknown_year = True
else:
unknown_year = False
max_results = int(request.form['maxres'])
author_list = []
author_flag = True
for i in range(15):
auth = 'author' + str(i+1)
author = request.form[auth]
if (len(author) > 0):
author_list.append(author)
if len(author_list) == 0:
author_flag = False
print(exclude_TCP)
# Create ngrams
ngram_master_list = []
for i in range(ngram_low, ngram_high+1):
ngram_master_list.append(ngrammer(paragraph, i))
#check if connected to Elasticsearch
es_inst = check_status(os.environ['ES_ENDPOINT'])
if (es_inst == False):
flash('Not connected to elasticsearch!')
return redirect(request.url)
result_df = | pd.DataFrame(columns = ['ngram', 'total_hits', 'TCP_ID', 'year', 'author', 'title', 'highlight']) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
import mhkit.utils as utils
from pandas.testing import assert_frame_equal
import json
from os.path import abspath, dirname, join, isfile
testdir = dirname(abspath(__file__))
datadir = join(testdir, 'data')
class TestGenUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
loads_data_file = join(datadir, "loads_data_dict.json")
with open(loads_data_file, 'r') as fp:
data_dict = json.load(fp)
# convert dictionaries into dataframes
data = {
key: | pd.DataFrame(data_dict[key]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Train, test, evaluate, and use a gene symbol classifier to assign gene symbols
to protein sequences.
Evaluate a trained network
A trained network, specified with the `--checkpoint` argument with its path,
is evaluated by assigning symbols to the canonical translations of protein sequences
of annotations in the latest Ensembl release and comparing them to the existing
symbol assignments.
Get statistics for existing symbol assignments
Gene symbol assignments from a classifier can be compared against the existing
assignments in the Ensembl database, by specifying the path to the assignments CSV file
with `--assignments_csv` and the Ensembl database name with `--ensembl_database`.
"""
# standard library imports
import argparse
import csv
import datetime as dt
import json
import math
import pathlib
import pprint
import random
import sys
import time
# third party imports
import numpy as np
import pandas as pd
import torch
import torchmetrics
import yaml
from loguru import logger
from torch import nn
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
# project imports
from utils import (
GeneSymbolClassifier,
SequenceDataset,
data_directory,
get_assemblies_metadata,
get_species_taxonomy_id,
get_taxonomy_id_clade,
get_xref_canonical_translations,
load_checkpoint,
logging_format,
read_fasta_in_chunks,
sequences_directory,
)
selected_genome_assemblies = {
"GCA_002007445.2": ("Ailuropoda melanoleuca", "Giant panda"),
"GCA_900496995.2": ("Aquila chrysaetos chrysaetos", "Golden eagle"),
"GCA_009873245.2": ("Balaenoptera musculus", "Blue whale"),
"GCA_002263795.2": ("Bos taurus", "Cow"),
"GCA_000002285.2": ("Canis lupus familiaris", "Dog"),
"GCA_000951615.2": ("Cyprinus carpio", "Common carp"),
"GCA_000002035.4": ("<NAME>", "Zebrafish"),
"GCA_000001215.4": ("Drosophila melanogaster", "Drosophila melanogaster"),
"GCA_000181335.4": ("Felis catus", "Cat"),
"GCA_000002315.5": ("Gallus gallus", "Chicken"),
"GCA_000001405.28": ("Homo sapiens", "Human"),
"GCA_000001905.1": ("Loxodonta africana", "Elephant"),
"GCA_000001635.9": ("Mus musculus", "Mouse"),
"GCA_000003625.1": ("Oryctolagus cuniculus", "Rabbit"),
"GCA_002742125.1": ("Ovis aries", "Sheep"),
"GCA_000001515.5": ("Pan troglodytes", "Chimpanzee"),
"GCA_008795835.1": ("Panthera leo", "Lion"),
"GCA_000146045.2": ("Saccharomyces cerevisiae", "Saccharomyces cerevisiae"),
"GCA_000003025.6": ("Sus scrofa", "Pig"),
}
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EarlyStopping:
"""
Stop training if validation loss doesn't improve during a specified patience period.
"""
def __init__(self, patience=7, loss_delta=0):
"""
Args:
checkpoint_path (path-like object): Path to save the checkpoint.
patience (int): Number of calls to continue training if validation loss is not improving. Defaults to 7.
loss_delta (float): Minimum change in the monitored quantity to qualify as an improvement. Defaults to 0.
"""
self.patience = patience
self.loss_delta = loss_delta
self.no_progress = 0
self.min_validation_loss = np.Inf
def __call__(
self,
network,
optimizer,
experiment,
symbols_metadata,
validation_loss,
checkpoint_path,
):
if self.min_validation_loss == np.Inf:
self.min_validation_loss = validation_loss
logger.info("saving initial network checkpoint...")
checkpoint = {
"experiment": experiment,
"network_state_dict": network.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"symbols_metadata": symbols_metadata,
}
torch.save(checkpoint, checkpoint_path)
elif validation_loss <= self.min_validation_loss - self.loss_delta:
validation_loss_decrease = self.min_validation_loss - validation_loss
assert (
validation_loss_decrease > 0
), f"{validation_loss_decrease=}, should be a positive number"
logger.info(
f"validation loss decreased by {validation_loss_decrease:.4f}, saving network checkpoint..."
)
self.min_validation_loss = validation_loss
self.no_progress = 0
checkpoint = {
"experiment": experiment,
"network_state_dict": network.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"symbols_metadata": symbols_metadata,
}
torch.save(checkpoint, checkpoint_path)
else:
self.no_progress += 1
if self.no_progress == self.patience:
logger.info(
f"{self.no_progress} epochs with no validation loss improvement, stopping training"
)
return True
return False
class Experiment:
"""
Object containing settings values and status of an experiment.
"""
def __init__(self, experiment_settings, datetime):
for attribute, value in experiment_settings.items():
setattr(self, attribute, value)
# experiment parameters
self.datetime = datetime
# set a seed for the PyTorch random number generator if not present
if not hasattr(self, "random_seed"):
self.random_seed = random.randint(1, 100)
if self.included_genera is not None and self.excluded_genera is not None:
raise ValueError(
'"included_genera" and "excluded_genera" are mutually exclusive experiment settings parameters, specify values to at most one of them'
)
# early stopping
loss_delta = 0.001
self.stop_early = EarlyStopping(self.patience, loss_delta)
# loss function
self.criterion = nn.NLLLoss()
self.num_complete_epochs = 0
self.filename = f"{self.filename_prefix}_ns{self.num_symbols}_{self.datetime}"
# self.padding_side = "left"
self.padding_side = "right"
def __str__(self):
return pprint.pformat(self.__dict__, sort_dicts=False)
def generate_dataloaders(experiment):
"""
Generate training, validation, and test dataloaders from the dataset files.
Args:
experiment (Experiment): Experiment object containing metadata
Returns:
tuple containing the training, validation, and test dataloaders
"""
dataset = SequenceDataset(
num_symbols=experiment.num_symbols,
sequence_length=experiment.sequence_length,
padding_side=experiment.padding_side,
included_genera=experiment.included_genera,
excluded_genera=experiment.excluded_genera,
)
experiment.symbol_mapper = dataset.symbol_mapper
experiment.protein_sequence_mapper = dataset.protein_sequence_mapper
experiment.clade_mapper = dataset.clade_mapper
experiment.num_protein_letters = len(
experiment.protein_sequence_mapper.protein_letters
)
experiment.num_clades = len(experiment.clade_mapper.categories)
pandas_symbols_categories = experiment.symbol_mapper.categorical_datatype.categories
logger.info(
"gene symbols:\n{}".format(
pandas_symbols_categories.to_series(
index=range(len(pandas_symbols_categories)), name="gene symbols"
)
)
)
# calculate the training, validation, and test set size
dataset_size = len(dataset)
experiment.validation_size = int(experiment.validation_ratio * dataset_size)
experiment.test_size = int(experiment.test_ratio * dataset_size)
experiment.training_size = (
dataset_size - experiment.validation_size - experiment.test_size
)
# split dataset into training, validation, and test datasets
training_dataset, validation_dataset, test_dataset = random_split(
dataset,
lengths=(
experiment.training_size,
experiment.validation_size,
experiment.test_size,
),
)
logger.info(
f"dataset split to training ({experiment.training_size}), validation ({experiment.validation_size}), and test ({experiment.test_size}) datasets"
)
# set the batch size equal to the size of the smallest dataset if larger than that
experiment.batch_size = min(
experiment.batch_size,
experiment.training_size,
experiment.validation_size,
experiment.test_size,
)
training_loader = DataLoader(
training_dataset,
batch_size=experiment.batch_size,
shuffle=True,
num_workers=experiment.num_workers,
)
validation_loader = DataLoader(
validation_dataset,
batch_size=experiment.batch_size,
shuffle=True,
num_workers=experiment.num_workers,
)
test_loader = DataLoader(
test_dataset,
batch_size=experiment.batch_size,
shuffle=True,
num_workers=experiment.num_workers,
)
return (training_loader, validation_loader, test_loader)
def train_network(
network,
optimizer,
experiment,
symbols_metadata,
training_loader,
validation_loader,
):
tensorboard_log_dir = f"runs/{experiment.num_symbols}/{experiment.datetime}"
summary_writer = SummaryWriter(log_dir=tensorboard_log_dir)
max_epochs = experiment.max_epochs
criterion = experiment.criterion
checkpoint_path = f"{experiment.experiment_directory}/{experiment.filename}.pth"
logger.info(f"start training, experiment checkpoints saved at {checkpoint_path}")
max_epochs_length = len(str(max_epochs))
num_train_batches = math.ceil(experiment.training_size / experiment.batch_size)
num_batches_length = len(str(num_train_batches))
if not hasattr(experiment, "average_training_losses"):
experiment.average_training_losses = []
if not hasattr(experiment, "average_validation_losses"):
experiment.average_validation_losses = []
experiment.epoch = experiment.num_complete_epochs + 1
epoch_times = []
for epoch in range(experiment.epoch, max_epochs + 1):
epoch_start_time = time.time()
experiment.epoch = epoch
# training
########################################################################
training_losses = []
# https://torchmetrics.readthedocs.io/en/latest/pages/overview.html#metrics-and-devices
train_accuracy = torchmetrics.Accuracy().to(DEVICE)
# set the network in training mode
network.train()
batch_execution_times = []
batch_loading_times = []
pre_batch_loading_time = time.time()
for batch_number, (inputs, labels) in enumerate(training_loader, start=1):
batch_start_time = time.time()
batch_loading_time = batch_start_time - pre_batch_loading_time
if batch_number < num_train_batches:
batch_loading_times.append(batch_loading_time)
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# zero accumulated gradients
network.zero_grad()
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
with torch.no_grad():
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# compute training loss
training_loss = criterion(output, labels)
training_losses.append(training_loss.item())
summary_writer.add_scalar("loss/training", training_loss, epoch)
# perform back propagation
training_loss.backward()
# prevent the exploding gradient problem
nn.utils.clip_grad_norm_(network.parameters(), experiment.clip_max_norm)
# perform an optimization step
optimizer.step()
batch_train_accuracy = train_accuracy(predictions, labels)
average_training_loss = np.average(training_losses)
batch_finish_time = time.time()
pre_batch_loading_time = batch_finish_time
batch_execution_time = batch_finish_time - batch_start_time
if batch_number < num_train_batches:
batch_execution_times.append(batch_execution_time)
train_progress = f"epoch {epoch:{max_epochs_length}} batch {batch_number:{num_batches_length}} of {num_train_batches} | average loss: {average_training_loss:.4f} | accuracy: {batch_train_accuracy:.4f} | execution: {batch_execution_time:.2f}s | loading: {batch_loading_time:.2f}s"
logger.info(train_progress)
experiment.num_complete_epochs += 1
average_training_loss = np.average(training_losses)
experiment.average_training_losses.append(average_training_loss)
# validation
########################################################################
num_validation_batches = math.ceil(
experiment.validation_size / experiment.batch_size
)
num_batches_length = len(str(num_validation_batches))
validation_losses = []
validation_accuracy = torchmetrics.Accuracy().to(DEVICE)
# disable gradient calculation
with torch.no_grad():
# set the network in evaluation mode
network.eval()
for batch_number, (inputs, labels) in enumerate(validation_loader, start=1):
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# compute validation loss
validation_loss = criterion(output, labels)
validation_losses.append(validation_loss.item())
summary_writer.add_scalar("loss/validation", validation_loss, epoch)
batch_validation_accuracy = validation_accuracy(predictions, labels)
average_validation_loss = np.average(validation_losses)
validation_progress = f"epoch {epoch:{max_epochs_length}} validation batch {batch_number:{num_batches_length}} of {num_validation_batches} | average loss: {average_validation_loss:.4f} | accuracy: {batch_validation_accuracy:.4f}"
logger.info(validation_progress)
average_validation_loss = np.average(validation_losses)
experiment.average_validation_losses.append(average_validation_loss)
total_validation_accuracy = validation_accuracy.compute()
average_batch_execution_time = sum(batch_execution_times) / len(
batch_execution_times
)
average_batch_loading_time = sum(batch_loading_times) / len(batch_loading_times)
epoch_finish_time = time.time()
epoch_time = epoch_finish_time - epoch_start_time
epoch_times.append(epoch_time)
train_progress = f"epoch {epoch:{max_epochs_length}} complete | validation loss: {average_validation_loss:.4f} | validation accuracy: {total_validation_accuracy:.4f} | time: {epoch_time:.2f}s"
logger.info(train_progress)
logger.info(
f"training batch average execution time: {average_batch_execution_time:.2f}s | average loading time: {average_batch_loading_time:.2f}s ({num_train_batches - 1} complete batches)"
)
if experiment.stop_early(
network,
optimizer,
experiment,
symbols_metadata,
average_validation_loss,
checkpoint_path,
):
summary_writer.flush()
summary_writer.close()
break
training_time = sum(epoch_times)
average_epoch_time = training_time / len(epoch_times)
logger.info(
f"total training time: {training_time:.2f}s | epoch average training time: {average_epoch_time:.2f}s ({epoch} epochs)"
)
return checkpoint_path
def test_network(checkpoint_path, print_sample_assignments=False):
"""
Calculate test loss and generate metrics.
"""
experiment, network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint_path)
logger.info("start testing classifier")
logger.info(f"experiment:\n{experiment}")
logger.info(f"network:\n{network}")
# get test dataloader
_, _, test_loader = generate_dataloaders(experiment)
criterion = experiment.criterion
num_test_batches = math.ceil(experiment.test_size / experiment.batch_size)
num_batches_length = len(str(num_test_batches))
test_losses = []
test_accuracy = torchmetrics.Accuracy().to(DEVICE)
test_precision = torchmetrics.Precision(
num_classes=experiment.num_symbols, average="macro"
).to(DEVICE)
test_recall = torchmetrics.Recall(
num_classes=experiment.num_symbols, average="macro"
).to(DEVICE)
with torch.no_grad():
network.eval()
for batch_number, (inputs, labels) in enumerate(test_loader, start=1):
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# calculate test loss
test_loss = criterion(output, labels)
test_losses.append(test_loss.item())
batch_accuracy = test_accuracy(predictions, labels)
test_precision(predictions, labels)
test_recall(predictions, labels)
logger.info(
f"test batch {batch_number:{num_batches_length}} of {num_test_batches} | accuracy: {batch_accuracy:.4f}"
)
# log statistics
average_test_loss = np.mean(test_losses)
total_test_accuracy = test_accuracy.compute()
precision = test_precision.compute()
recall = test_recall.compute()
logger.info(
f"testing complete | average loss: {average_test_loss:.4f} | accuracy: {total_test_accuracy:.4f}"
)
logger.info(f"precision: {precision:.4f} | recall: {recall:.4f}")
if print_sample_assignments:
num_sample_assignments = 10
# num_sample_assignments = 20
# num_sample_assignments = 100
with torch.no_grad():
network.eval()
inputs, labels = next(iter(test_loader))
# inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
inputs = inputs.to(DEVICE)
with torch.random.fork_rng():
torch.manual_seed(time.time() * 1000)
permutation = torch.randperm(len(inputs))
inputs = inputs[permutation[0:num_sample_assignments]]
labels = labels[permutation[0:num_sample_assignments]]
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# reset logger, add raw messages format
logger.remove()
logger.add(sys.stderr, format="{message}")
log_file_path = pathlib.Path(checkpoint_path).with_suffix(".log")
logger.add(log_file_path, format="{message}")
assignments = network.symbol_mapper.one_hot_to_label(predictions.cpu())
labels = network.symbol_mapper.one_hot_to_label(labels)
logger.info("\nsample assignments")
logger.info("assignment | true label")
logger.info("-----------------------")
for assignment, label in zip(assignments, labels):
if assignment == label:
logger.info(f"{assignment:>10} | {label:>10}")
else:
logger.info(f"{assignment:>10} | {label:>10} !!!")
def assign_symbols(
network,
symbols_metadata,
sequences_fasta,
scientific_name=None,
taxonomy_id=None,
output_directory=None,
):
"""
Use the trained network to assign symbols to the sequences in the FASTA file.
"""
sequences_fasta_path = pathlib.Path(sequences_fasta)
if scientific_name is not None:
taxonomy_id = get_species_taxonomy_id(scientific_name)
clade = get_taxonomy_id_clade(taxonomy_id)
# logger.info(f"got clade {clade} for {scientific_name}")
if output_directory is None:
output_directory = sequences_fasta_path.parent
assignments_csv_path = pathlib.Path(
f"{output_directory}/{sequences_fasta_path.stem}_symbols.csv"
)
# read the FASTA file in chunks and assign symbols
with open(assignments_csv_path, "w+", newline="") as csv_file:
# generate a csv writer, create the CSV file with a header
field_names = ["stable_id", "symbol", "probability", "description", "source"]
csv_writer = csv.writer(csv_file, delimiter="\t", lineterminator="\n")
csv_writer.writerow(field_names)
for fasta_entries in read_fasta_in_chunks(sequences_fasta_path):
if fasta_entries[-1] is None:
fasta_entries = [
fasta_entry
for fasta_entry in fasta_entries
if fasta_entry is not None
]
identifiers = [fasta_entry[0].split(" ")[0] for fasta_entry in fasta_entries]
sequences = [fasta_entry[1] for fasta_entry in fasta_entries]
clades = [clade for _ in range(len(fasta_entries))]
assignments_probabilities = network.predict_probabilities(sequences, clades)
# save assignments and probabilities to the CSV file
for identifier, (assignment, probability) in zip(
identifiers, assignments_probabilities
):
symbol_description = symbols_metadata[assignment]["description"]
symbol_source = symbols_metadata[assignment]["source"]
csv_writer.writerow(
[
identifier,
assignment,
probability,
symbol_description,
symbol_source,
]
)
logger.info(f"symbol assignments saved at {assignments_csv_path}")
def save_network_from_checkpoint(checkpoint_path):
"""
Save the network in a checkpoint file as a separate file.
"""
_experiment, network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint_path)
path = checkpoint_path
network_path = pathlib.Path(f"{path.parent}/{path.stem}_network.pth")
torch.save(network, network_path)
return network_path
def log_pytorch_cuda_info():
"""
Log PyTorch and CUDA info and device to be used.
"""
logger.debug(f"{torch.__version__=}")
logger.debug(f"{DEVICE=}")
logger.debug(f"{torch.version.cuda=}")
logger.debug(f"{torch.backends.cudnn.enabled=}")
logger.debug(f"{torch.cuda.is_available()=}")
if torch.cuda.is_available():
logger.debug(f"{torch.cuda.device_count()=}")
logger.debug(f"{torch.cuda.get_device_properties(DEVICE)}")
def evaluate_network(checkpoint_path, complete=False):
"""
Evaluate a trained network by assigning gene symbols to the protein sequences
of genome assemblies in the latest Ensembl release, and comparing them to the existing
Xref assignments.
Args:
checkpoint_path (Path): path to the experiment checkpoint
complete (bool): Whether or not to run the evaluation for all genome assemblies.
Defaults to False, which runs the evaluation only for a selection of
the most important species genome assemblies.
"""
experiment, network, _optimizer, symbols_metadata = load_checkpoint(checkpoint_path)
symbols_set = set(symbol.lower() for symbol in experiment.symbol_mapper.categories)
assemblies = get_assemblies_metadata()
comparison_statistics_list = []
for assembly in assemblies:
if not complete and assembly.assembly_accession not in selected_genome_assemblies:
continue
canonical_fasta_filename = assembly.fasta_filename.replace(
"pep.all.fa", "pep.all_canonical.fa"
)
canonical_fasta_path = sequences_directory / canonical_fasta_filename
# assign symbols
assignments_csv_path = pathlib.Path(
f"{checkpoint_path.parent}/{canonical_fasta_path.stem}_symbols.csv"
)
if not assignments_csv_path.exists():
logger.info(f"assigning gene symbols to {canonical_fasta_path}")
assign_symbols(
network,
symbols_metadata,
canonical_fasta_path,
scientific_name=assembly.scientific_name,
output_directory=checkpoint_path.parent,
)
comparisons_csv_path = pathlib.Path(
f"{checkpoint_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
if not comparisons_csv_path.exists():
comparison_successful = compare_with_database(
assignments_csv_path,
assembly.core_db,
assembly.scientific_name,
symbols_set,
)
if not comparison_successful:
continue
comparison_statistics = get_comparison_statistics(comparisons_csv_path)
comparison_statistics["scientific_name"] = assembly.scientific_name
comparison_statistics["taxonomy_id"] = assembly.taxonomy_id
comparison_statistics["clade"] = assembly.clade
comparison_statistics_list.append(comparison_statistics)
message = "{}: {} assignments, {} exact matches ({:.2f}%), {} fuzzy matches ({:.2f}%), {} total matches ({:.2f}%)".format(
comparison_statistics["scientific_name"],
comparison_statistics["num_assignments"],
comparison_statistics["num_exact_matches"],
comparison_statistics["matching_percentage"],
comparison_statistics["num_fuzzy_matches"],
comparison_statistics["fuzzy_percentage"],
comparison_statistics["num_total_matches"],
comparison_statistics["total_matches_percentage"],
)
logger.info(message)
dataframe_columns = [
"clade",
"scientific_name",
"num_assignments",
"num_exact_matches",
"matching_percentage",
"num_fuzzy_matches",
"fuzzy_percentage",
"num_total_matches",
"total_matches_percentage",
]
comparison_statistics = pd.DataFrame(
comparison_statistics_list,
columns=dataframe_columns,
)
clade_groups = comparison_statistics.groupby(["clade"])
clade_groups_statistics = []
for clade, group in clade_groups:
with pd.option_context("display.float_format", "{:.2f}".format):
group_string = group.to_string(index=False)
num_assignments_sum = group["num_assignments"].sum()
num_exact_matches_sum = group["num_exact_matches"].sum()
num_fuzzy_matches_sum = group["num_fuzzy_matches"].sum()
num_total_matches_sum = num_exact_matches_sum + num_fuzzy_matches_sum
matching_percentage_weighted_average = (
num_exact_matches_sum / num_assignments_sum
) * 100
fuzzy_percentage_weighted_average = (
num_fuzzy_matches_sum / num_assignments_sum
) * 100
total_percentage_weighted_average = (
num_total_matches_sum / num_assignments_sum
) * 100
averages_message = "{} weighted averages: {:.2f}% exact matches, {:.2f}% fuzzy matches, {:.2f}% total matches".format(
clade,
matching_percentage_weighted_average,
fuzzy_percentage_weighted_average,
total_percentage_weighted_average,
)
clade_statistics = f"{group_string}\n{averages_message}"
clade_groups_statistics.append(clade_statistics)
comparison_statistics_string = "comparison statistics:\n"
comparison_statistics_string += "\n\n".join(
clade_statistics for clade_statistics in clade_groups_statistics
)
logger.info(comparison_statistics_string)
def is_exact_match(symbol_a, symbol_b):
symbol_a = symbol_a.lower()
symbol_b = symbol_b.lower()
if symbol_a == symbol_b:
return "exact_match"
else:
return "no_exact_match"
def is_fuzzy_match(symbol_a, symbol_b):
symbol_a = symbol_a.lower()
symbol_b = symbol_b.lower()
if symbol_a == symbol_b:
return "no_fuzzy_match"
if (symbol_a in symbol_b) or (symbol_b in symbol_a):
return "fuzzy_match"
else:
return "no_fuzzy_match"
def is_known_symbol(symbol, symbols_set):
symbol = symbol.lower()
if symbol in symbols_set:
return "known"
else:
return "unknown"
def compare_with_database(
assignments_csv,
ensembl_database,
scientific_name=None,
symbols_set=None,
EntrezGene=False,
Uniprot_gn=False,
):
"""
Compare classifier assignments with the gene symbols in the genome assembly
ensembl_database core database on the public Ensembl MySQL server.
"""
assignments_csv_path = pathlib.Path(assignments_csv)
canonical_translations = get_xref_canonical_translations(
ensembl_database, EntrezGene=EntrezGene, Uniprot_gn=Uniprot_gn
)
if len(canonical_translations) == 0:
if scientific_name is None:
logger.info("0 canonical translations retrieved, nothing to compare")
else:
logger.info(
f"{scientific_name}: 0 canonical translations retrieved, nothing to compare"
)
return False
comparisons = []
with open(assignments_csv_path, "r", newline="") as assignments_file:
csv_reader = csv.reader(assignments_file, delimiter="\t")
_csv_field_names = next(csv_reader)
for csv_row in csv_reader:
csv_stable_id = csv_row[0]
classifier_symbol = csv_row[1]
probability = csv_row[2]
translation_stable_id = csv_stable_id.split(".")[0]
if (
translation_stable_id
in canonical_translations["translation.stable_id"].values
):
xref_symbol = canonical_translations.loc[
canonical_translations["translation.stable_id"]
== translation_stable_id,
"Xref_symbol",
].values[0]
comparisons.append(
(csv_stable_id, xref_symbol, classifier_symbol, probability)
)
dataframe_columns = [
"csv_stable_id",
"xref_symbol",
"classifier_symbol",
"probability",
]
compare_df = pd.DataFrame(comparisons, columns=dataframe_columns)
compare_df["exact_match"] = compare_df.apply(
lambda x: is_exact_match(x["classifier_symbol"], x["xref_symbol"]),
axis=1,
result_type="reduce",
)
compare_df["fuzzy_match"] = compare_df.apply(
lambda x: is_fuzzy_match(x["classifier_symbol"], x["xref_symbol"]),
axis=1,
result_type="reduce",
)
if symbols_set:
compare_df["known_symbol"] = compare_df.apply(
lambda x: is_known_symbol(x["xref_symbol"], symbols_set),
axis=1,
result_type="reduce",
)
comparisons_csv_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
compare_df.to_csv(comparisons_csv_path, sep="\t", index=False)
return True
def get_comparison_statistics(comparisons_csv_path):
compare_df = | pd.read_csv(comparisons_csv_path, sep="\t", index_col=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
Calculate the intermolecular native contacts of each 2KOD monomer residue.
Eventually have options to output per-residue time series and pdb file with b-factor or occupancy replaced.
TODO:
Using MDAnalysis, calculate native/non-native contacts.
Map residue pairs involved onto a scatter or heatmap.
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import MDAnalysis as mda
from MDAnalysis.analysis import contacts
def traj_loader(parm, crd, step=1000):
"""
Load and return a trajectory from mda universe.
Input parmater file, coordinate file, and loading step interval (default=1000).
"""
traj = mda.Universe(parm, crd, in_memory=True, in_memory_step=step, verbose=True)
return traj
def calc_single_residue_contact_residues(universe, selection_1, selection_2, radius=4.5):
"""
Find the residues between selection_1 and selection_2 within radius.
TODO: maybe I can use my previous functions and adapt instead?
Parameters
----------
universe : MDA universe object
Simulation trajectory.
selection_1 : str
Select a single residue.
selection_2 : str
Select the entire region to compare to.
radius : float (optional, default=4.5)
Contacts within this cutoff (Angstrom) are considered.
Returns
-------
total, native, non-native : list?
List of residues within radius.
"""
def contacts_within_cutoff(universe, selection_1, selection_2, radius=4.5, plot=False):
"""
Input universe object, calculate inter-monomer total contacts, plot time series (optional).
"""
# selections for distance matrix calculation
mono1 = universe.select_atoms(selection_1)
mono2 = universe.select_atoms(selection_2)
timeseries = []
for ts in universe.trajectory:
# calculate distances between group_a and group_b
dist = contacts.distance_array(mono1.positions, mono2.positions)
# determine which distances <= radius
n_contacts = contacts.contact_matrix(dist, radius).sum()
timeseries.append([ts.frame, n_contacts])
df = pd.DataFrame(timeseries, columns=['Frame', '# Contacts'])
if plot is True:
average_contacts = np.mean(df.iloc[:, 1])
# plot time series contacts(t)
fig, ax = plt.subplots()
ax.plot(df.iloc[:, 0], df.iloc[:, 1])
ax.set(xlabel='frame', ylabel='number of contacts',
title='Number of Contacts, average = {:.2f}'.format(average_contacts))
return df
def fraction_native_contacts(universe, selection_1, selection_2, plot=False, method="soft_cut", ref=None):
"""
Input universe object, calculate inter-monomer native contacts, plot time series (optional).
"""
# reference groups: option to use separate pdb file
if ref is not None:
mono1 = traj_loader(ref, ref, step=1).select_atoms(selection_1)
mono2 = traj_loader(ref, ref, step=1).select_atoms(selection_2)
# reference groups: option to use first frame of trajectory
elif ref is None:
mono1 = universe.select_atoms(selection_1)
mono2 = universe.select_atoms(selection_2)
# set up analysis of native contacts
nc = contacts.Contacts(universe, selection=(selection_1, selection_2), refgroup=(mono1, mono2),
method=method, radius=4.5).run()
# save as 2 column pd df
nc_df = pd.DataFrame(nc.timeseries, columns=['Frame', 'Fraction Native Contacts'])
#print(nc.contact_matrix)
if plot is True:
average_contacts = np.mean(nc.timeseries[:, 1])
# plot time series q(t)
fig, ax = plt.subplots()
ax.plot(nc.timeseries[:, 0], nc.timeseries[:, 1])
ax.set(xlabel='frame', ylabel='fraction of native contacts', ylim=(0,1),
title='Native Contacts, average = {:.2f}'.format(average_contacts))
plt.show()
return nc_df
def per_residue_contacts_time_series(universe, selection_1, selection_2, datatype, method="soft_cut", ref=None):
"""
Parameters: universe object, selction terms 1 and 2. Datatype must be 'fraction' (NC) or 'total' (contacts).
Returns: df of per residue `datatype` contacts time series.
"""
hxb2_seq = "PIVQNIQGQMVHQAISPRTLNAWVKVVEEKAFSPEVIPMFSALSEGATPQDLNTMLNTVGGHQAAMQMLKETINEEAAEWDRVHPVHAGPIAPGQMREPRGSDIAGTTSTLQEQIGWMTNNPPIPVGEIYKRWIILGLNKIVRMYSPTSILDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQNANPDCKTILKALGPAATLEEMMTACQGVGGPGHKARVL"
seq_dict = dict(zip(range(1, 232), [char for char in hxb2_seq]))
# empty df to be filled in for heatmap
mono1_df = pd.DataFrame()
mono2_df = pd.DataFrame()
# calculate fraction native contacts per residue (monomer 1 - last 20?)
for res in range(6, 76):
if datatype == "fraction":
res_df = fraction_native_contacts(universe, f"resnum {res} and not name H*", selection_2,
method=method, ref=ref)
elif datatype == "total":
res_df = contacts_within_cutoff(universe, f"resnum {res} and not name H*", selection_2)
# canonical numbering correction of + 143 (canonical CTD - 2KOD starts at 144: res 6 = S149)
mono1_df[seq_dict[res + 143] + str(res + 143)] = res_df.iloc[:, 1] # all rows, second col
# fill NaN values with 0: no inter monomer native contacts along those residues
mono1_df = mono1_df.fillna(0).transpose()
# calculate fraction native contacts per residue (monomer 2 - last 20?)
for res in range(94, 164):
if datatype == "fraction":
res_df = fraction_native_contacts(universe, selection_1, f"resnum {res} and not name H*",
method=method, ref=ref)
elif datatype == "total":
res_df = contacts_within_cutoff(universe, selection_1, f"resnum {res} and not name H*")
# canonical numbering correction: + 143 (canonical CTD) - 88 (monomer 2 numbering)
mono2_df[seq_dict[res + 143 - 88] + str(res + 143 - 88)] = res_df.iloc[:, 1] # all rows, second col
# fill NaN values with 0: no inter monomer contacts along those residues
mono2_df = mono2_df.fillna(0).transpose()
# returns heatmap ready dataframes for monomer 1 and monomer 2 residues
return mono1_df, mono2_df
def contacts_heatmap(monomer_df_1, monomer_df_2, datatype):
"""
Plots a heatmap of both monomer 1 and 2.
Datatype must be "total" or "fraction" contacts data.
"""
# format and plot heatmap
fig, ax = plt.subplots(1, 3, sharex=False, sharey=False, figsize=(12,8),
gridspec_kw={'width_ratios' : [20, 20, 1.5]})
if datatype == "fraction":
cbar_label = "Fraction of Native Contacts"
elif datatype == "total":
cbar_label = "Amount of Total Contacts"
cbar_color = "viridis"
a = sns.heatmap(monomer_df_1, ax=ax[0],
cmap=cbar_color, cbar=False,
xticklabels=25, yticklabels=1)
a.set_yticklabels(a.get_ymajorticklabels(), size=8)
ax[0].set(xlabel="Frame", ylabel="Residue Index", title="CTD Monomer 1")
b = sns.heatmap(monomer_df_2, ax=ax[1],
cmap=cbar_color, cbar_kws={'label': cbar_label}, cbar_ax=ax[2],
xticklabels=25, yticklabels=1)
b.set_yticklabels(b.get_ymajorticklabels(), size=8)
ax[1].set(xlabel="Frame", title="CTD Monomer 2")
plt.tight_layout()
plt.show()
#plt.savefig(f"figures/per_res_{datatype}_contacts.png", dpi=300)
def timeseries_to_average_contacts(dataframe):
"""
Input: dataframe - rows = residue index, column = time series data.
Returns: 1 column dataframe rows = residue index, column = average contacts data.
TODO: prob don't need this as sep function.
"""
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[11]:
import pandas as pd
import numpy as np
import glob,os
from glob import iglob
#import scanpy as sc
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import RocCurveDisplay
from sklearn.datasets import load_wine
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
import os
import matplotlib as mpl
#os.environ["KMP_DUPLICATE_LIB_OK"] = "FALSE"
import time
import random
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
# # single cell sle part
# In[2]:
features=pd.read_csv('./combined_gene_for_machine_learning.csv',index_col=1)
features=np.append(features.index.values,'patient')
features=np.delete(features,[3,7,16,17,18,76,78,79])
# In[3]:
path = '../GSE135779_SLE/test/aHD/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
hd = []
for f in file:
hd.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(hd)):
hd[i]['patient']=0
# In[4]:
path = '../GSE135779_SLE/test/cHD/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
chd = []
for f in file:
chd.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(chd)):
chd[i]['patient']=0
# In[5]:
hd_m=hd[0]
for i in range(1,len(hd)):
hd_m=pd.concat([hd_m,hd[i]],axis=0)
# In[6]:
chd_m=chd[0]
for i in range(1,len(chd)):
chd_m=pd.concat([chd_m,chd[i]],axis=0)
# In[7]:
path = '../GSE135779_SLE/test/aSLE/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
asle = []
for f in file:
asle.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(asle)):
asle[i]['patient']=1
asle_m=asle[0]
for i in range(1,len(asle)):
asle_m=pd.concat([asle_m,asle[i]],axis=0)
# In[8]:
path = '../GSE135779_SLE/test/cSLE/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
csle = []
for f in file:
csle.append( | pd.read_csv(f,index_col=0) | pandas.read_csv |
'''
Model sets
=================
'''
import pyomo.environ as po
import pyomo.core.base.sets as poset
import pandas as pd
import numpy as np
from grimsel.auxiliary.aux_general import silence_pd_warning
from grimsel.auxiliary.aux_m_func import cols2tuplelist
from grimsel import _get_logger
logger = _get_logger(__name__)
DICT_SETS_DOC = {r'sy': r'model time slots : df_tm_soy : t',
r'ppall': r'all power plant types : df_def_plant : p',
r'pp': r'dispatchable power plants with fuels : df_def_plant : p',
r'st': r'storage plants : df_def_plant : p',
r'pr': r'variable renewables with fixed profiles : df_def_plant : p',
r'ror': r'run-of-river plants : df_def_plant : p',
r'lin': r'dispatchable plants with linear supply curve : df_def_plant : p',
r'hyrs': r'hydro reservoirs : df_def_plant : p',
'chp': r'plants with co-generation : df_def_plant : p',
r'add': r'plants with capacity additions : df_def_plant : p',
r'rem': r'plants with capacity retirements : df_def_plant : p',
r'curt': r'dedicated curtailment technology : df_def_plant : p',
r'sll': r'plants selling produced energy carriers : df_def_plant : p',
r'rp': r'dispatchable plants with ramping costs : df_def_plant : p',
r'ppall_nd': r'combined :math:`\mathrm{ppall\times nd}` set; equivalent for all subsets of :math:`\mathrm{ppall}` : df_def_plant : (p,n)',
r'ppall_ndca': r'combined :math:`\mathrm{ppall\times nd\times ca}` set; equivalent for all subsets of :math:`\mathrm{ppall}` : merge(df_def_plant, df_plant_encar) : (p,n,c)',
r'ppall_ndcafl': r'combined :math:`\mathrm{ppall\times nd\times ca\times fl}` set; equivalent for all subsets of :math:`\mathrm{ppall}` : merge(df_def_plant, df_plant_encar) : (p,n,c,f)',
r'pp_ndcafl_sll': r'"fuels" sold by power plants :math:`\mathrm{pp}` consuming energy carrier :math:`\mathrm{ca}` : merge(df_def_plant, df_plant_encar) : (p,n,c,f)',
r'sy_hydbc\subset sy': r'Time slots with exogenously defined storage level boundary conditions. : df_plant_month : t',
r'mt': r'months : df_plant_month : m',
r'wk': r'weeks : df_plant_week : w',
r'ndcnn': r'combined node sets :math:`\mathrm{nd\times nd\times ca}` for inter-nodal transmission : df_node_connect : (n,n_2,c)',
r'symin_ndcnn': r'combined node sets :math:`\mathrm{sy\times nd\times nd\times ca}` for inter-nodal transmission : merge(df_tm_soy, df_node_connect) : (t,n,n_2,c)',
r'fl_erg': r'fuels with energy production constraints : df_def_fuel : f',
r'tm': r'time maps : df_tm_soy : \tau',
r'tmsy': r'combination of time maps and slots : df_tm_soy : (\tau,t)',
r'tmsy_mt': r'all relevant combinations :math:`\mathrm{tm\times sy\times mt}` : df_tm_soy : (\tau,t,m)',
r'sy_ppall_ca': r'combined :math:`\mathrm{sy\times ppall\times nd}` set; equivalent for all subsets of :math:`\mathrm{ppall}` : merge(df_plant_encar, df_tm_soy) : (t,p,c)',
r'nd': r'Nodes : df_def_node : n',
r'ca': r'Output energy carriers : df_def_encar : c',
r'fl': r'Fuels : df_def_fuel : f',
r'ndcafl': r'Relevant combinations of nodes, produced energy carriers, and fuels : df_node_fuel_encar : (n,c,f)',
r'pf': r'Profiles (demand, supply, price, etc) : df_def_profile : \phi',
r'sy_ndca': r'Combined :math:`\mathrm{sy\times nd\times ca}` set : merge(df_node_encar, df_tm_soy) : (t,n,c)',
r'pp_ndcaca': r'combined :math:`\mathrm{pp\times nd\times ca\times ca}` set describing plants which convert one produced energy carrier into another : merge(df_def_encar, df_def_plant, df_plant_encar) : (p,n,c_{out},c)'
}
class Sets:
'''
Mixin class for set definition.
'''
# base power plant subsets
slct_sets = ['ppall', 'pp', 'st', 'pr', 'ror', 'lin',
'hyrs', 'chp', 'add', 'rem',
'curt', 'sll', 'rp']
def define_sets(self):
r'''
Add all required sets to the model.
Adds sets as defined by
* the ``setlst`` dictionary initialized in the :func:`get_setlst`
method
* the DataFrame attributes of the :class:`ModelBase` class for more
complex derived sets
%s
'''
self.nd = po.Set(initialize=self.setlst['nd'])
self.ca = po.Set(initialize=self.setlst['ca'])
self.fl = po.Set(initialize=self.setlst['fl'])
self.pf = po.Set(initialize=self.setlst['pf'])
df_ndca = self.df_def_plant[['pp_id', 'nd_id']].set_index('pp_id')
df_ndca = self.df_plant_encar[['pp_id', 'ca_id']].join(df_ndca,
on='pp_id')
df_ndca = df_ndca[['pp_id', 'nd_id', 'ca_id']]
slct_cols = ['pp_id', 'ca_id']
for iset in self.slct_sets:
logger.info('Defining basic sets for {}'.format(iset))
''' SUB SETS PP'''
setattr(self, iset,
po.Set(within=(None if iset == 'ppall' else self.ppall),
initialize=self.setlst[iset])
if iset in self.setlst.keys()
else po.Set(within=self.ppall, initialize=[]))
''' SETS PP x ENCAR '''
_df = self.df_plant_encar.copy()
_df = _df.loc[_df['pp_id'].isin(getattr(self, iset))]
setattr(self, iset + '_ca',
po.Set(within=getattr(self, iset) * self.ca,
initialize=cols2tuplelist(_df[slct_cols])))
''' SETS PP x ND x ENCAR '''
_df = df_ndca.copy()
_df = _df.loc[df_ndca['pp_id'].isin(self.setlst[iset]
if iset in self.setlst.keys() else [])]
setattr(self, iset + '_ndca',
po.Set(within=getattr(self, iset) * self.nd * self.ca,
initialize=cols2tuplelist(_df)))
# no scf fuels in the _cafl and _ndcafl
# These are used to calculate fuel costs, that's why we don't want
# the generated fuels in there.
df_0 = self.df_def_plant[['pp_id', 'nd_id', 'fl_id']]
df_0 = df_0.set_index('pp_id')
df_0 = self.df_plant_encar[['pp_id', 'ca_id']].join(df_0, on='pp_id')
df_0 = df_0.loc[df_0.fl_id.isin(self.setlst['fl'])]
list_sets = ['ppall', 'hyrs', 'pp', 'chp', 'ror', 'st', 'lin']
# list_sets = [st for st in list_sets if st in self.setlst.keys()]
for iset in list_sets:
if iset in self.setlst:
cols_ppcafl = ['pp_id', 'ca_id', 'fl_id']
df = df_0.loc[df_0['pp_id'].isin(self.setlst[iset]),
cols_ppcafl]
new_set = po.Set(within=getattr(self, iset) * self.ca * self.fl,
initialize=cols2tuplelist(df))
setattr(self, iset + '_cafl', new_set)
slct_cols_ppndcafl = ['pp_id', 'nd_id', 'ca_id', 'fl_id']
df = df_0.loc[df_0.pp_id.isin(self.setlst[iset]),
slct_cols_ppndcafl]
setattr(self, iset + '_ndcafl',
po.Set(within=(getattr(self, iset) * self.nd
* self.ca * self.fl),
initialize=cols2tuplelist(df)))
else:
new_set_cafl = po.Set(within=getattr(self, iset) * self.ca * self.fl,
initialize=[])
setattr(self, iset + '_cafl', new_set_cafl)
new_set_ndcafl = po.Set(within=getattr(self, iset) * self.nd * self.ca * self.fl,
initialize=[])
setattr(self, iset + '_ndcafl', new_set_ndcafl)
# plants selling fuels ... only ppall, therefore outside the loop
lst = cols2tuplelist(df.loc[df.pp_id.isin(self.setlst['sll']
if 'sll' in self.setlst
else [])])
setattr(self, 'pp_ndcafl_sll',
po.Set(within=self.pp_ndcafl, initialize=lst))
# temporal
self.sy = po.Set(initialize=list(self.df_tm_soy.sy.unique()),
ordered=True)
self.sy_hydbc = (po.Set(within=self.sy,
initialize=set(self.df_plant_month.sy))
if not self.df_plant_month is None else None)
self.mt = (po.Set(initialize=list(self.df_def_month['mt_id']))
if not self.df_def_month is None else None)
self.wk = (po.Set(initialize=list(self.df_def_week['wk_id']))
if not self.df_def_week is None else None)
# pp_cacafcl; used to account for conversions of ca in the supply rule
if 'fl_id' in self.df_def_encar:
df_cafl = self.df_def_encar.set_index('fl_id')['ca_id']
df_cafl = df_cafl.rename('ca_fl_id')
df_ppca = self.df_plant_encar.set_index('pp_id')['ca_id']
df = (self.df_def_plant.join(df_ppca, on='pp_id')
.join(df_cafl, on='fl_id'))
df = df.loc[-df.ca_fl_id.isnull()
& -df.ca_id.isnull(), ['pp_id', 'nd_id', 'ca_id',
'ca_fl_id']]
self.pp_ndcaca = po.Set(within=self.pp_ndca * self.ca,
initialize=cols2tuplelist(df))
else:
self.pp_ndcaca = None
# inter-node connections
if not self.df_node_connect is None and not self.df_node_connect.empty:
df = self.df_node_connect[['nd_id', 'nd_2_id', 'ca_id']]
self.ndcnn = po.Set(within=self.nd * self.nd * self.ca,
initialize=cols2tuplelist(df), ordered=True)
df = self.df_symin_ndcnn[['symin', 'nd_id', 'nd_2_id', 'ca_id']]
self.symin_ndcnn = po.Set(within=self.sy * self.nd
* self.nd * self.ca,
initialize=cols2tuplelist(df),
ordered=True)
else:
self.ndcnn = po.Set(within=self.nd * self.nd * self.ca)
self.symin_ndcnn = po.Set(within=self.sy * self.nd
* self.nd * self.ca)
# ndca for electricity only; mainly used for flexible demand;
# then again: why would only EL have flexible demand?
df = pd.concat([pd.Series(self.slct_node_id, name='nd_id'),
pd.Series(np.ones(len(self.slct_node_id))
* self.mps.dict_ca_id['EL'],
name='ca_id')], axis=1)
self.ndca_EL = po.Set(within=self.nd * self.ca,
initialize=cols2tuplelist(df), ordered=True)
# general ndca
df = self.df_node_encar[['nd_id', 'ca_id']].drop_duplicates()
self.ndca = po.Set(within=self.nd * self.ca,
initialize=cols2tuplelist(df), ordered=True)
# general ndcafl
if not self.df_fuel_node_encar is None:
df = self.df_fuel_node_encar[['nd_id', 'ca_id', 'fl_id']]
self.ndcafl = po.Set(within=self.nd * self.ca * self.fl,
initialize=cols2tuplelist(df), ordered=True)
else:
self.ndcafl = None
# fuels with energy constraints
if 'is_constrained' in self.df_def_fuel:
lst = self.df_def_fuel.loc[self.df_def_fuel.is_constrained==1,
'fl_id'].tolist()
self.fl_erg = po.Set(within=self.fl, initialize=lst, ordered=True)
else:
self.fl_erg = po.Set(within=self.fl, initialize=[])
# set pf_id for profiles
for pf_set in ['dmnd_pf', 'supply_pf', 'pricesll_pf', 'pricebuy_pf']:
setattr(self, pf_set,
po.Set(within=self.pf, initialize=self.setlst[pf_set],
ordered=True))
self._init_tmsy_sets()
def _init_tmsy_sets(self):
'''
The plant ids and the time slots are connected
through the node-specific time resolution.
'''
self.tm = po.Set(initialize=self.df_tm_soy.tm_id.unique(),
ordered=True)
list_tmsy = cols2tuplelist(self.df_tm_soy[['tm_id', 'sy']])
self.tmsy = po.Set(within=self.tm*self.sy, initialize=list_tmsy,
ordered=True)
# only constructed if self.mt exists
self.tmsy_mt = (po.Set(within=self.tmsy * self.mt,
initialize=cols2tuplelist(
self.df_tm_soy[['tm_id', 'sy', 'mt_id']]))
if not self.mt is None else None)
df = pd.merge(self.df_def_node, self.df_node_encar,
on='nd_id', how='outer')[['nd_id', 'ca_id']]
df = df.loc[~df.ca_id.isna()].drop_duplicates()
df['tm_id'] = df.nd_id.replace(self.dict_nd_tm_id)
cols = ['sy', 'nd_id', 'ca_id']
list_syndca = pd.merge(self.df_tm_soy[['tm_id', 'sy']],
df, on='tm_id', how='outer')[cols]
self.sy_ndca = po.Set(within=self.sy*self.ndca, ordered=True,
initialize=cols2tuplelist(list_syndca))
mask_pp = self.df_plant_encar.pp_id.isin(self.setlst['ppall'])
df = self.df_plant_encar.loc[mask_pp, ['pp_id', 'ca_id']].copy()
df['tm_id'] = (df.pp_id
.replace(self.mps.dict_plant_2_node_id)
.replace(self.dict_nd_tm_id))
cols = ['sy', 'pp_id', 'ca_id']
list_syppca = pd.merge(self.df_tm_soy[['sy', 'tm_id']],
df, on='tm_id', how='outer')[cols]
list_syppca = list_syppca.loc[~(list_syppca.pp_id.isna()
| list_syppca.ca_id.isna())]
list_syppca = cols2tuplelist(list_syppca)
for slct_set in ['ppall', 'rp', 'st', 'hyrs', 'pr',
'pp', 'chp', 'ror', 'lin']:
set_name = 'sy_%s_ca'%slct_set
within = self.sy * getattr(self, slct_set) * self.ca
if slct_set in self.setlst:
logger.info('Defining set ' + set_name)
set_pp = set(self.setlst[slct_set])
setattr(self, set_name,
po.Set(within=within, ordered=True,
initialize=[row for row in list_syppca
if row[1] in set_pp]))
else:
setattr(self, set_name, po.Set(within=within, initialize=[]))
def get_setlst(self):
'''
Lists of indices for all model components are extracted from the
input tables and stored in a dictionary ``ModelBase.setlst``.
For the most part power plant subset definitions are based on the
binary columns *set_def_..* in the ``df_def_plant`` input table.
'''
# define lists for set initialization
self.setlst = {st: [] for st in self.slct_sets}
df = self.df_def_plant
qry = ' & '.join(['{} == 0'.format(sd)
for sd in ('set_def_tr', 'set_def_dmd')
if sd in df.columns])
self.setlst['ppall'] = (df.query(qry).pp_id.tolist())
for ippset in df.columns[df.columns.str.contains('set_def')]:
# Note: index starting at 8 removes prefix set_def_ from col name
self.setlst[ippset[8:]] = df.loc[df[ippset] == 1, 'pp_id'].tolist()
mask_node = self.df_def_node['nd_id'].isin(self.slct_node_id)
self.setlst['nd'] = self.df_def_node.loc[mask_node]['nd_id'].tolist()
self.setlst['ca'] = self.df_def_encar.ca_id.tolist()
# fuels are bought fuels only, not generated encars used as input
df = self.df_def_fuel.copy()
self.setlst['fl'] = df.fl_id.tolist()
for col, df in [('supply_pf_id', self.df_plant_encar),
('pricesll_pf_id', self.df_fuel_node_encar),
('pricebuy_pf_id', self.df_fuel_node_encar),
('dmnd_pf_id', self.df_node_encar)]:
if col in df.columns:
df_ = df.copy()
df_ = df_.loc[-df_[col].isna(), col]
self.setlst[col.replace('_id', '')] = df_.unique().tolist()
else:
self.setlst[col.replace('_id', '')] = []
self.setlst['pf'] = (self.setlst['dmnd_pf']
+ self.setlst['pricesll_pf']
+ self.setlst['pricebuy_pf']
+ self.setlst['supply_pf'])
self.setlst['rp'] = ((self.setlst['pp'] if 'pp' in self.setlst else [])
+ (self.setlst['ror'] if 'ror' in self.setlst else [])
+ (self.setlst['hyrs'] if 'hyrs' in self.setlst else [])
+ (self.setlst['st'] if 'st' in self.setlst else []))
@silence_pd_warning
@staticmethod
def _get_set_docs():
'''
Convenience method to extract all set docs from a :class:`ModelBase`
instance.
'''
import tabulate
to_math = lambda x: ':math:`\mathrm{%s}`'%x
comb_sets = ['ndcnn', 'tmsy']
cols = ['Set', 'Members', 'Description', 'Source table']
df_doc = | pd.Series(DICT_SETS_DOC) | pandas.Series |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
| Timestamp('2031-12-29') | pandas.Timestamp |
import os
import gzip
import json
import math
import random
import pickle
import pprint
import argparse
from preprocess import convert_unique_idx, split_train_test, create_pair
from util_origin import load_model, get_args, get_device, set_env
import numpy as np
import pandas as pd
import train
import torch
from race_dataset_origin import Dataset
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
class DatasetLoader(object):
def load(self):
"""Minimum condition for dataset:
* All users must have at least one item record.
* All items must have at least one user record.
"""
raise NotImplementedError
class RaceDataset(DatasetLoader):
def __init__(self, data_path):
self.file_path = data_path
def load(self):
# Load data
names = ['user', 'sequence']
train_df = pd.read_csv(self.file_path, delimiter=':', names=names)
return train_df
def get_prepared_data(data_path, test_size=0.2):
dataset = RaceDataset(data_path)
df = dataset.load()
df = df.sort_values(by=['user'], ascending=True).reset_index(drop=True)
# users = df['user'].unique()
record_array = []
for index, row in df.iterrows():
item_list = row['sequence'].split(',')
unique_item_list = np.unique(item_list)
for item in unique_item_list:
# record_df = record_df.append(pd.DataFrame([[row['user'], item]], columns=['user', 'item']))
record_array.append([row['user'], item])
record_df = | pd.DataFrame(record_array, columns=['user', 'item'], dtype=int) | pandas.DataFrame |
#import dependencies
import pandas
import numpy
import hvplot.pandas
import matplotlib.pyplot as plt
#machine learning dependencies
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from connect_sql_db import build_engine
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense
from sklearn.preprocessing import StandardScaler, OneHotEncoder
#predictor
def nueral_network_classifier(dictionary):
engine = build_engine(database_name="database1",host="192.168.3.11")
cleaned_df = | pandas.read_sql("select * from cleaned_table", con=engine) | pandas.read_sql |
import logging
import numpy
import pandas as pd
from covidactnow.datapublic.common_fields import CommonFields
from libs.datasets import data_source
from libs.datasets import dataset_utils
from libs.us_state_abbrev import US_STATE_ABBREV
from libs.datasets.common_fields import CommonIndexFields
_logger = logging.getLogger(__name__)
def fill_missing_county_with_city(row):
"""Fills in missing county data with city if available.
"""
if pd.isnull(row.county) and not | pd.isnull(row.city) | pandas.isnull |
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, PasswordChangeForm
from django.contrib import messages
from .forms import SignUpForm, EditPofileForm, ChangePasswordForm
from django.http import HttpResponse
from django.http import JsonResponse
from django.template import loader
from ticker.backend import merge_dicts, tickerIndices, getGL, getHomeNews, eqlist, equitydataAPI, equitywiki, equitydataRefresh, equitydataNews, equitydataPeer, equitydataCharts, equitydataChartsdef
from ticker.predictor import predictorModel
import requests
from nsetools import Nse
from pprint import pprint
nse = Nse()
#################################################
urlmain = 'http://127.0.0.1:8000/quoteAPI/v1/{}'#
#################################################
def home(request):
context_all = merge_dicts(tickerIndices(), getGL(), getHomeNews())
return render(request, 'home.html',context_all)
# About
def team(request):
return render(request, 'about/team.html', {})
def careers(request):
return render(request, 'about/careers.html', {})
# Pricing
def pricing(request):
return render(request, 'pricing.html', {})
# Services
def services(request):
return render(request, 'services.html', {})
# API
def quoteAPI(request,symbol):
if nse.is_valid_code(symbol):
context_all = equitydataAPI(symbol)
return JsonResponse(context_all, safe=False)
else:
return handler404(request)
# Pages
def equitylist(request):
data = eqlist()
return render(request, 'equitylist.html', context= {'data':data})
# Equity
def autoRefreshEqu(request,symbol):
if nse.is_valid_code(symbol):
context_all = equitydataRefresh(symbol)
return render(request,'equity/autorefequ.html',context_all )
def equity(request,symbol):
import pandas as pd
from datetime import datetime, date, time
from nsepy import get_history
if nse.is_valid_code(symbol):
x = equitydataAPI(symbol)
x = x['data'][0]
start_date = request.GET.get('start_date')
end_date = request.GET.get('end_date')
if start_date and end_date:
context = equitydataCharts(symbol,start_date,end_date)
else:
context = equitydataChartsdef(symbol)
context_all = {
'symbol': symbol,
'compDet': equitywiki(symbol),
'quoteNews' : equitydataNews(symbol),
'peerlist': equitydataPeer(symbol),
'x': x,
'quote':quote,
#'changeQuote':changeQuote,
}
context_all = merge_dicts(context, context_all)
template = loader.get_template('equity/equity.html')
response_body = template.render(context_all,request)
return HttpResponse(response_body)
'''
def quote(request):
# Getting Value From HTML Input
if request.method == "POST":
#quote = request.POST["quote"]
quote = request.POST.get('quote', '').strip().split(' ')[0].strip().upper()
print("Requested Quote : ",quote)
# Getting Value's From NSE API
x = nse.get_quote(quote)
print(x)
changeQuote = float(x['change'])
cName = (x['companyName'])
print(cName)
cURL = "https://newsapi.org/v2/everything?q="+ cName +"&apiKey=<KEY>"
#print(cURL)
quoteNewsSet = (requests.get(cURL)).json
context_all = {'quote':quote,'x':x,'changeQuote':changeQuote, 'cName':cName, 'quoteNews':quoteNewsSet }
return render(request, 'quote.html', context_all)
else:
return handler404(request)
'''
def quote(request):
# Getting Value From HTML Input
if request.method == "POST":
quote = request.POST["quote"]
if nse.is_valid_code(quote):
return quote_data(request,quote)
else:
return handler404(request)
else:
return handler404(request)
def quote_data(request, symbol):
x = nse.get_quote(symbol)
changeQuote = float(x['change'])
cName = (x['companyName'])
demoname = cName.split()[:int(len(cName)/2)]
#demoname = demoname
demoname = ' '.join(demoname)
descCompany = "https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={}".format(demoname)
wsSet = (requests.get(descCompany)).json()
param = list(wsSet['query']['pages'].keys())
param = "".join(param)
title = wsSet['query']['pages'][param]['title']
extract = wsSet['query']['pages'][param]['extract']
url2 = ("https://en.wikipedia.org/api/rest_v1/page/summary/{}".format(title))
nSet = (requests.get(url2)).text
nsSet = (requests.get(url2)).json()
if 'thumbnail' in nSet:
imgurl = (nsSet['thumbnail']['source'])
else:
imgurl = "data:image/gif;base64,R0lGODlhAQABAIAAAHd3dwAAACH5BAAAAAAALAAAAAABAAEAAAICRAEAOw=="
url2 = (nsSet['content_urls']['desktop']['page'])
compDet = {
'title' : title,
'extract' : extract,
'url2':url2,
'imgurl':imgurl
}
import json
peerurl = ("https://nseindia.com/live_market/dynaContent/live_watch/get_quote/ajaxPeerCompanies.jsp?symbol={}").format(symbol)
r = requests.get(peerurl)
data = r.text
data = data.replace('\r', '')
data = data.replace('\n', '')
data = data.replace('industry', '"industry"', 1)
data = data.replace('data', '"data"', 1)
jdata = json.loads(data)
r = json.dumps(jdata)
loaded_r = json.loads(r)
keys = []
values = []
for key in range(0,len(loaded_r['data'])):
symbol_c = loaded_r['data'][key]["symbol"]
keys.append(symbol_c)
for value in range(0,len(loaded_r['data'])):
symbol_c = loaded_r['data'][value]["name"]
values.append(symbol_c)
peerlist = dict(zip(keys, values))
print(peerlist)
cURL = "https://newsapi.org/v2/everything?q="+ cName +"&apiKey=<KEY>"
quoteNewsSet = (requests.get(cURL)).json
import quandl as qd
import pandas as pd
import datetime
dtnow = datetime.datetime.today().strftime("%d-%b-%Y")
dt_now = datetime.datetime.today().strftime("%d-%b-%Y %H:%M:%S")
qdauth = 'JvPxndekpt7dVpVZnwLR'
equity = 'NSE/' + symbol
#####
start_date = request.GET.get('start_date')
end_date = request.GET.get('end_date')
if start_date and end_date:
df = qd.get(equity,start_date=start_date, end_date=end_date, authtoken = qdauth)
else:
df = qd.get(equity,start_date="2018-01-01", end_date=dtnow, authtoken = qdauth)
df = df.reset_index()
df['Date'] = pd.to_datetime(df['Date'], format='%Y%m%d').dt.strftime("%Y-%m-%d")
dfOpen = df[['Date','Open']]
dfClose = df[['Date','Close']]
dfHigh = df[['Date','High']]
dfLow = df[['Date','Low']]
dfOC = df[['Date','Open', 'Close']]
dfHL = df[['Date','High', 'Low']]
contextOpen = dfOpen.to_json(orient='records')
contextClose = dfClose.to_json(orient='records')
contextHigh = dfHigh.to_json(orient='records')
contextLow = dfLow.to_json(orient='records')
contextOC = dfOC.to_json(orient='records')
contextHL = dfHL.to_json(orient='records')
context_all = {'quote':quote, 'peerlist':peerlist ,'x':x,'changeQuote':changeQuote, 'cName':cName, 'quoteNews':quoteNewsSet, 'contextOpen' : contextOpen,
'contextClose' : contextClose,
'contextHigh' : contextHigh,
'contextLow' : contextLow,
'contextOC': contextOC,
'contextHL' : contextHL,
'dt_now':dt_now,
'compDet':compDet }
template = loader.get_template('quote-data.html')
response_body = template.render(context_all,request)
return HttpResponse(response_body)
#'''
#except ConnectionAbortedError:
# return HttpResponse(''' ConnectionAbortedError ''')
#except :
# return render(request, 'server-error.html', {'symbol':symbol})
#'''
def api_req(request, symbol):
context_all = predictorModel(symbol)
#context_all = {"symbol":symbol}
return JsonResponse(context_all)
def chartstd(request):
import quandl as qd
import pandas as pd
import datetime
dt_now = datetime.datetime.today().strftime('%Y-%m-%d')
qdauth = 'JvPxndekpt7dVpVZnwLR'
#####
start_date = request.GET.get('start_date')
end_date = request.GET.get('end_date')
if start_date and end_date:
df = qd.get('NSE/BAJFINANCE',start_date=start_date, end_date=end_date, authtoken = qdauth)
else:
df = qd.get('NSE/BAJFINANCE',start_date="2018-01-01", end_date=dt_now, authtoken = qdauth)
df = df.reset_index()
df['Date'] = | pd.to_datetime(df['Date'], format='%Y%m%d') | pandas.to_datetime |
from typing import Iterable, Dict, Union, List
from json import dumps
from requests import get
from http import HTTPStatus
import pandas as pd
import zipfile
import requests
import io
import os
import re
import scipy
import scipy.stats
import numpy.random as random
import numpy as np
def get_paginated_dataset(filters: Iterable[str], structure: Dict[str, Union[dict, str]] = None,
start_page = 1, end_page=None) -> pd.DataFrame:
"""This is lifted from the NHSE website: https://coronavirus.data.gov.uk/developers-guide
The "filters" param is used to determine what geographical level you will pull,
whilst the "structure" param describes the fields you will pull. The function will loop
over all the pages requested (or all pages if none specified).
ISSUES: The API seems to time out for large datasets (i.e. UTLA), so you might need to pull
in multiple small batches of 5 or 10 pages at a time.
-------
Params
-------
filters : list(str,...)
The geographic area you want. Example: ["areaType=nation;areaName=england"]
You can choose to not include areaName: ['areaType=nation"].
Options for areaType: overview, nation, region, nhsRegion, utla, ltla
structure : dict(str / dict(str))
The columns you want. You specify it as either just a dictionary full of columm
names (the key of the dict defines what the column comes out as for you, so below,
the areaName column comes out as "name"):
{"date": "date",
"areatype": "areaType",
"name": "areaName",
"code": "areaCode",
"newAdmissions": "newAdmissions"}
The options you can take are:
# date - the date of the data point
# areaType - the area type
# areaName - area name
# areaCode - area code (ONS format, i.e. E0000000001).
# newCasesByPublishDate - New cases by publish date
# cumCasesByPublishDate - Cumulative cases by publish date
# cumCasesBySpecimenDateRate - Rate of cumulative cases by publish date per 100k resident population
# newCasesBySpecimenDate - New cases by specimen date
# cumCasesBySpecimenDateRate - Rate of cumulative cases by specimen date per 100k resident population
# cumCasesBySpecimenDate - Cumulative cases by specimen date
# maleCases - Male cases (by age)
# femaleCases - Female cases (by age)
# newPillarOneTestsByPublishDate - New pillar one tests by publish date
# cumPillarOneTestsByPublishDate - Cumulative pillar one tests by publish date
# newPillarTwoTestsByPublishDate - New pillar two tests by publish date
# cumPillarTwoTestsByPublishDate - Cumulative pillar two tests by publish date
# newPillarThreeTestsByPublishDate - New pillar three tests by publish date
# cumPillarThreeTestsByPublishDate - Cumulative pillar three tests by publish date
# newPillarFourTestsByPublishDate - New pillar four tests by publish date
# cumPillarFourTestsByPublishDate - Cumulative pillar four tests by publish date
# newAdmissions - New admissions
# cumAdmissions - Cumulative number of admissions
# cumAdmissionsByAge - Cumulative admissions by age
# cumTestsByPublishDate - Cumulative tests by publish date
# newTestsByPublishDate - New tests by publish date
# covidOccupiedMVBeds - COVID-19 occupied beds with mechanical ventilators
# hospitalCases - Hospital cases
# plannedCapacityByPublishDate - Planned capacity by publish date
# newDeaths28DaysByPublishDate - Deaths within 28 days of positive test
# cumDeaths28DaysByPublishDate - Cumulative deaths within 28 days of positive test
# cumDeaths28DaysByPublishDateRate - Rate of cumulative deaths within 28 days of positive test per 100k resident population
# newDeaths28DaysByDeathDate - Deaths within 28 days of positive test by death date
# cumDeaths28DaysByDeathDate - Cumulative deaths within 28 days of positive test by death date
# cumDeaths28DaysByDeathDateRate - Rate of cumulative deaths within 28 days of positive test by death date per 100k resident population
"""
if structure is None:
structure = {"date": "date",
"areatype": "areaType",
"name": "areaName",
"code": "areaCode",
'newCasesByPublishDate' : 'newCasesByPublishDate',
'cumCasesByPublishDate' : 'cumCasesByPublishDate',
'cumCasesBySpecimenDateRate' : 'cumCasesBySpecimenDateRate',
'newCasesBySpecimenDate' : 'newCasesBySpecimenDate',
'cumCasesBySpecimenDateRate' : 'cumCasesBySpecimenDateRate',
'cumCasesBySpecimenDate' : 'cumCasesBySpecimenDate',
'maleCases' : 'maleCases',
'femaleCases' : 'femaleCases',
'newPillarOneTestsByPublishDate' : 'newPillarOneTestsByPublishDate',
'cumPillarOneTestsByPublishDate' : 'cumPillarOneTestsByPublishDate',
'newPillarTwoTestsByPublishDate' : 'newPillarTwoTestsByPublishDate',
'cumPillarTwoTestsByPublishDate' : 'cumPillarTwoTestsByPublishDate',
'newPillarThreeTestsByPublishDate' : 'newPillarThreeTestsByPublishDate',
'cumPillarThreeTestsByPublishDate' : 'cumPillarThreeTestsByPublishDate',
'newPillarFourTestsByPublishDate' : 'newPillarFourTestsByPublishDate',
'cumPillarFourTestsByPublishDate' : 'cumPillarFourTestsByPublishDate',
'newAdmissions' : 'newAdmissions',
'cumAdmissions' : 'cumAdmissions',
'cumAdmissionsByAge' : 'cumAdmissionsByAge',
'cumTestsByPublishDate' : 'cumTestsByPublishDate',
'newTestsByPublishDate' : 'newTestsByPublishDate',
'covidOccupiedMVBeds' : 'covidOccupiedMVBeds',
'hospitalCases' : 'hospitalCases',
'plannedCapacityByPublishDate' : 'plannedCapacityByPublishDate',
'newDeaths28DaysByPublishDate' : 'newDeaths28DaysByPublishDate',
'cumDeaths28DaysByPublishDate' : 'cumDeaths28DaysByPublishDate',
'cumDeaths28DaysByPublishDateRate' : 'cumDeaths28DaysByPublishDateRate',
'newDeaths28DaysByDeathDate' : 'newDeaths28DaysByDeathDate',
'cumDeaths28DaysByDeathDate' : 'cumDeaths28DaysByDeathDate',
'cumDeaths28DaysByDeathDateRate' : 'cumDeaths28DaysByDeathDateRate',
}
endpoint = "https://api.coronavirus.data.gov.uk/v1/data"
api_params = dict(filters=str.join(";", filters),
structure=dumps(structure, separators=(",", ":")),
format="json", page=1)
data = list()
page_number = start_page
current_data = dict(pagination={'next':True}) # dummy initial "next" pagination
while current_data["pagination"]["next"] is not None:
api_params["page"] = page_number
if page_number == end_page: break
try:
response = get(endpoint, params=api_params, timeout=10)
except Exception as error:
print(f" Trying page {page_number} again...")
continue
if response.status_code >= HTTPStatus.BAD_REQUEST:
raise RuntimeError(f'Request failed: {response.text}')
elif response.status_code == HTTPStatus.NO_CONTENT:
break
current_data = response.json()
page_data: List[StructureType] = current_data['data']
data.extend(page_data)
print(f'{str.join(";", filters)} page {page_number}: {response.url}')
page_number += 1
return | pd.DataFrame(data) | pandas.DataFrame |
import requests
import re
import json
from datetime import datetime
from tqdm import tqdm
import pandas as pd
class JobCrawler(object):
# prefix for 104 website
PRE_LINK = "https://www.104.com.tw/jobs/search/"
# area-area_code mapping file path
MAP_PATH = "./mapping.json"
def __init__(self):
self.page_source_code = None
self.url = None
self.mapping = None
def get(self, url):
'''get response.'''
res = requests.get(url)
if res.status_code == 200:
self.page_source_code = res.text
return
def _parse_all_count(self):
'''parse page source code and return counts of jobs of different types.'''
# Found counts of job on the page
text = re.search(r'initFilter =({(.*?)});', self.page_source_code).group(1)
values = json.loads(text)['count'][:5]
keys = ['all', 'fulltime', 'parttime', 'highend', 'temp']
res = dict([(k, v) for k, v in zip(keys, values)])
return res
def _get_area_mapping(self):
'''obtain area mapping from preprocessing work.'''
with open(self.MAP_PATH, 'r', encoding='utf-8') as fin:
self.mapping = json.load(fin)
return
def crawl(self):
'''
Main crawler function.
- compose urls for different areas and job experience criteria
- get the number of counts of each job type
- save results to data directory
'''
# get area code
self._get_area_mapping()
# final results list
result = []
# for each area
for area in tqdm(self.mapping):
# for each job experience criteria
for exp in [1, 3, 5, 10, 99]:
# compose url with area and experience condition
url = f"https://www.104.com.tw/jobs/search/?area={area['area_code']}&jobexp={exp}&isnew=0"
# obtain response and parse the number of counts
self.get(url)
counts = self._parse_all_count()
# compose subset result
res = {
"area_name" : area['area_name'],
"area_code" : area['area_code'],
"job_exp" : exp
}
# add counts of job under different types to res
res.update(counts)
# append subset result to list for all results
result.append(res)
# prepare output file destination, add time information
outputfile = "data/result-" + datetime.today().strftime('%Y-%m-%d') + '.csv'
# convert into csv format with pandas modules
df = | pd.DataFrame(result) | pandas.DataFrame |
"""<NAME>0.
MLearner Machine Learning Library Extensions
Author:<NAME><www.linkedin.com/in/jaisenbe>
License: MIT
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime
import time
import joblib
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import xgboost as xgb
from xgboost import XGBClassifier
import seaborn as sns
from mlearner.training import Training
from mlearner.utils import ParamsManager
import warnings
warnings.filterwarnings("ignore")
param_file = "mlearner/classifier/config/models.json"
class modelXGBoost(Training, BaseEstimator, ClassifierMixin):
"""
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient,
flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science
problems in a fast and accurate way. The same code runs on major distributed environment
(Hadoop, SGE, MPI) and can solve problems beyond billions of examples.
Parameters
----------
"min_child_weight": [ Minimum sum of instance weight (hessian) needed in a child.
"objective": learning task.
"eval_metric": Evaluation metrics for validation data.
"max_depth": Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit
"max_delta_step": /Maximum delta step we allow each leaf output to be. If the value is set to 0, it means there is no constraint.
"sampling_method": The method to use to sample the training instances.
"subsample": Subsample ratio of the training instances. Setting it to 0.5 means that XGBoost would randomly sample half of the training data prior to growing trees. and this will prevent overfitting.
"eta": tep size shrinkage used in update to prevents overfitting.
"gamma": Minimum loss reduction required to make a further partition on a leaf node of the tree.
"lambda": L2 regularization term on weights. Increasing this value will make model more conservative.
"alpha": L1 regularization term on weights. Increasing this value will make model more conservative.
"tree_method": he tree construction algorithm used in XGBoost.
"predictor": The type of predictor algorithm to use.
"num_parallel_tree": umber of parallel trees constructed during each iteration.
...
Documentation
-------------
https://xgboost.readthedocs.io/en/latest/
https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
"""
def __init__(self, name="XGB", random_state=99, train_dir="", params=None, *args, **kwargs):
self.name = name
self.train_dir = train_dir + "/" + "model_" + str(self.name) + "/"
self.random_state = random_state
if params is None:
self.get_params_json()
self.params.update({
'model_dir': self.train_dir,
"seed": self.random_state})
else:
# if isinstance(params)
self.params = params
self.model = XGBClassifier(**self.params)
super().__init__(self.model, random_state=self.random_state)
def get_params_json(self):
self.manager_models = ParamsManager(param_file, key_read="Models")
self.params = self.manager_models.get_params()["XGBoost"]
self.manager_finetune = ParamsManager(param_file, key_read="FineTune")
self.params_finetune = self.manager_finetune.get_params()["XGBoost"]
def dataset(self, X, y, categorical_columns_indices=None, test_size=0.2, *args, **kwarg):
self.categorical_columns_indices = categorical_columns_indices
self.X = X
self.columns = list(X)
self.y, self.cat_replace = self.replace_multiclass(y)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_size, random_state=self.random_state)
self.dtrain = xgb.DMatrix(self.X_train, label=self.y_train)
self.dvalid = xgb.DMatrix(self.X_test, label=self.y_test)
self.all_train_data = xgb.DMatrix(self.X, label=self.y)
def set_dataset_nosplit(self, X_train, X_test, y_train, y_test, categorical_columns_indices=None, *args, **kwarg):
self.categorical_columns_indices = categorical_columns_indices
self.columns = list(X_train)
_ytrain, _ = self.replace_multiclass(y_train)
_ytest, _ = self.replace_multiclass(y_test)
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.X = pd.concat([X_train, X_test], axis=0)
self.y = | pd.concat([y_train, y_test], axis=0) | pandas.concat |
"""
Run assays and get information on molecules.
"""
import os
import arcade
import pandas as pd
from combine import MolChoose
from descriptors import get_descriptors, lipinski
from filters import run_filters
from rdkit import Chem
import global_vars
from analysis import AnalysisView
# Constants
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Feedback"
# button names (and costs/duration for assays)
ASSAYS = {'pic50': {'cost': 70, 'duration': 0.5, 'name': 'pIC50'},
'cl_mouse': {'cost': 7000, 'duration': 3, 'name': 'Clearance (mouse)'},
'cl_human': {'cost': 9000, 'duration': 3.5, 'name': 'Clearance (humans)'},
'logd': {'cost': 1000, 'duration': 1.5, 'name': 'LogD'},
'pampa': {'cost': 700, 'duration': 1, 'name': 'PAMPA'}}
ACTIONS = ['run_assays', 'clear_choices']
CALCULATIONS = ['calculate_descriptors', 'run_filters']
DESC_NAMES = {'MW': 'Molecular weight',
'logP': 'LogP',
'TPSA': 'TPSA',
'HA': 'Heavy atom count',
'h_acc': 'Number of HBAs',
'h_don': 'Number of HBDs',
'rings': 'Number of rings'}
class Button(arcade.Sprite):
"""Sprite button class"""
def __init__(self, mol, button, scale=1):
# hold the button name and image
self.button = button
self.image_file_name = os.path.join('images', 'button_pngs', f'{self.button}.png')
# get molecule information
self.chosen_mol = mol
# call the parent class
super().__init__(self.image_file_name, scale)
def get_result(self):
"""
:Returns the assay result when an assay button is clicked.
:return: assay result for the chosen molecule
:rtype: string
"""
# retrieves the appropriate column name
if self.button == 'cl_human':
col = 'clearance_human'
elif self.button == 'cl_mouse':
col = 'clearance_mouse'
else:
col = self.button
result = self.chosen_mol.at[0, col]
return str(result)
def get_cost(self):
"""
:Returns the cost of the assay when the assay button is clicked
:return: assay cost
:rtype: string
"""
cost = ASSAYS[self.button]['cost']
return cost
def get_duration(self):
"""
:Returns the duration of the assay when the assay button is clicked
:return: assay duration
:rtype: string
"""
duration = ASSAYS[self.button]['duration']
return duration
def get_desc(self):
"""
:Returns a dictionary of descriptors calculated using the descriptors.py script
:return: calculated descriptors
:rtype: string
"""
descriptors = get_descriptors(self.chosen_mol.at[0, 'mol'])
descriptors.pop('mol')
for key, val in descriptors.items(): # round to 1 dp
if key not in ['HA', 'h_acc', 'h_don', 'rings']:
descriptors[key] = round(float(val), 1)
return descriptors
def run_filt(self):
"""
: Runs the filters in filter.py (PAINS, NIH, BRENK, ZINC) file
:return: whether the molecule passes the filter and describes any violations
:rtype: string
"""
filter_res = run_filters(Chem.MolFromSmiles(self.chosen_mol.at[0, 'mol']))
return filter_res
class FeedbackView(arcade.View):
"""
Feedback view class
"""
def __init__(self, mol_view=None):
# call the parent class and set up the window
super().__init__()
self.mol_view = mol_view
# list to hold the button sprites
self.button_list = None
# list to hold the mol sprite
self.mol_sprite_list = None
# tracks the assays chosen and the results
self.assay_choices = None # stores the assays chosen by the player
self.assay_results = None # stores the assay results of assays chosen
self.assay_choices_print = None # stores the assays to be displayed (i.e. all assays already run)
self.assay_results_print = None # stores the assay results to be displayed (as above)
# track the total cost and duration of the assays selected
self.total_cost = None
self.total_duration = None
# stores the descriptor and filter results
self.descriptor_results = None
self.filter_results = None
# stores the path to the font file
self.font = os.path.join('fonts', 'arial.ttf')
# sets the background color
arcade.set_background_color(arcade.color.OXFORD_BLUE)
# store the R group tags (will be updated by the molecule builder)
self.tags = ['A01', 'B01'] # initialise
for i, t in enumerate(self.mol_view.current_rs):
if t.tag == 0:
self.tags[i] = 0
else:
self.tags[i] = t.tag
# track which sprite we're near for displaying help text
self.hovered = None
self.hover_time = 0
self.location = (0, 0)
self.display_hover = False
# stores the molecule info
self.mol = None
self.setup()
def make_coordinates(self, sprite_no):
"""
Function to make the coordinates for the assay button sprites.
:param sprite_no: button number (i.e. 1-5)
:type sprite_no: int
:return: two numbers representing 2D coordinates for each sprite
:rtype: int, int
"""
y_slot = SCREEN_HEIGHT / 10
x_slot_width = SCREEN_WIDTH / 5
x_slot = (sprite_no * x_slot_width) - (x_slot_width / 2) + x_slot_width
return x_slot, y_slot
def check_assays_run(self):
"""
Function to check which assays have already been run so that the information is displayed
(prevents user from trying to run assays twice). Checks the df containing all the assay info
for the R group tags and the assays run.
"""
if (len(self.mol_view.assay_df) > 0) and (len(self.assay_choices_print) == 0): # if assays have been run and no assays have been printed to the assay view
try:
row = self.mol_view.assay_df.loc[(self.mol_view.assay_df['atag'] == self.tags[0]) & (self.mol_view.assay_df['btag'] == self.tags[1])] # find row in assay_df corosponding to current molecule
for assay in ASSAYS.keys():
if pd.isnull(row[assay].values[0]) == False: # if there is an assay result (cell is not nan)
self.assay_choices_print.append(assay) # add name of assay to be displayed
self.assay_results_print.append(row[assay].values[0]) # add value of assay
except IndexError: # catch situations where there is no row in the df matching the A and B tags (no assays have been run on that mol)
pass
def setup(self):
"""
Function to set up the feedback view. Creates the molecule and button sprites
and sets their positions.
"""
# initialise the variables
self.button_list = arcade.SpriteList()
self.assay_results = []
self.assay_choices = []
self.assay_results_print = []
self.assay_choices_print = []
self.total_cost = 0
self.total_duration = []
self.descriptor_results = {}
self.filter_results = {}
self.mol_sprite_list = arcade.SpriteList()
# retrieve molecule information from the input df using the r group tags
for tag in self.tags:
if 'A' in tag:
atag = tag
elif 'B' in tag:
btag = tag
self.mol = MolChoose(atag, btag, DataSource=os.path.join('data', 'r_group_decomp.csv'))
self.mol = self.mol.reset_index(drop=True)
# make the molecule sprite using the saved image
mol_sprite = arcade.Sprite(os.path.join('images', 'game_loop_images',
f'scaffold{self.mol_view.round_count}.png'))
mol_sprite.position = (SCREEN_WIDTH - (SCREEN_WIDTH / 6)), (SCREEN_HEIGHT - 150)
self.mol_sprite_list.append(mol_sprite)
# make the assay buttons (at bottom of the screen)
for i, assay in enumerate(ASSAYS.keys()):
assay_button = Button(self.mol, assay, 1)
assay_button.position = self.make_coordinates(i)
self.button_list.append(assay_button)
# make the other four buttons (at top of the screen)
for i, action in enumerate(ACTIONS + CALCULATIONS):
action_button = Button(self.mol, action, 0.6)
action_button.position = (i + (i + 1)) / 18 * SCREEN_WIDTH, (SCREEN_HEIGHT - 90)
self.button_list.append(action_button)
# run function when the screen is first loaded to check which assays have already been run for this molecule
self.check_assays_run()
def split_text(self, n_words, text):
"""
Split the hover text, [text], into multiple lines of length [n_words] so it fits on the screen.
:param n_words: the number of words to put on each line
:type n_words: int
:param text: the help text to appear when hovering over the button
:type text: string
:return: a list of strings representing each line of text
:rtype: list
"""
words = text.split() # make words a list of each individual word in text
split = [words[i:i + n_words] for i in range(0, len(words), n_words)] # join words into lines of length n_words
split = [" ".join(lst) for lst in split]
return split
def draw_hover(self):
"""
Draw the hover-over help text for the assay buttons
"""
# specify the help text for each button
text_dict = {'pic50': 'pIC50 represents the negative log of the IC50 (half-maximal inhibitory concentration), a measure of potency',
'cl_mouse': 'This assay measures the metabolic clearance of the drug in mice',
'cl_human': 'This assay measures the metabolic clearance of the drug in humans',
'logd': 'LogD represents the distribution coefficient used to measure lipophilicity',
'pampa': 'This assay measures the permeability of the compounds (parallel artificial membrane permeability assay)',
'calculate_descriptors': 'Calculate molecular properties of the molecule',
'run_filters': 'Run substructure filters (PAINS, NIH, BRENK, ZINC) on the molecule to identify compounds that may result in false positives',
'run_assays': 'Run selected assays. This will deduct time and money from your overall balance',
'clear_choices': 'Clear selected assays'}
full_text = text_dict[self.hovered.button] # what text to write out
lines = self.split_text(5, full_text) # split the text into multiple lines so it fits screen
# set the position of the help text
if self.hovered.button != 'pampa': # pampa button text drawn towards the left of the button due to position on the screen
line_locs = [(self.hovered.position[0] + 40, self.hovered.position[1] - 20 * i) for i in range(len(lines))]
else:
line_locs = [(self.hovered.position[0] - 150, self.hovered.position[1] - 20 * i) for i in range(len(lines))]
# create the text sprite
for i, line in enumerate(lines):
text_sprite = arcade.draw_text(line, line_locs[i][0], line_locs[i][1], color=arcade.color.BLACK, font_size=10, font_name=self.font)
# draw yellow background for the text
width = text_sprite.width
height = text_sprite.height
arcade.draw_rectangle_filled(line_locs[i][0] + width * 0.5, line_locs[i][1] + height * 0.5,
width + 10, height + 10,
color=arcade.color.YELLOW)
# draw the text
text_sprite.draw()
def add_units(self, descs):
"""
Function adds units to molecular weight and TPSA in the descriptor results.
"""
for key, val in descs.items(): # round to 1 dp
if key == 'MW':
descs[key] = str(val) + ' Da'
if key == 'TPSA':
descs[key] = str(val) + ' Å\u00b2'
return descs
def on_draw(self):
"""
Render the screen
"""
# clear the screen to the background colour
arcade.start_render()
# draw the chosen molecule section
arcade.draw_rectangle_filled((SCREEN_WIDTH - (SCREEN_WIDTH / 6)),
(SCREEN_HEIGHT - (4 / 5 * SCREEN_HEIGHT) / 2),
(SCREEN_WIDTH / 3),
(4 / 5 * SCREEN_HEIGHT),
color=arcade.color.BLACK)
arcade.draw_rectangle_filled((SCREEN_WIDTH - (SCREEN_WIDTH / 6)),
(SCREEN_HEIGHT - (4 / 5 * SCREEN_HEIGHT) / 2),
(SCREEN_WIDTH / 3 - 10),
(4 / 5 * SCREEN_HEIGHT - 10),
color=arcade.color.WHITE)
arcade.draw_text('Chosen molecule',
SCREEN_WIDTH - 260,
0.93 * SCREEN_HEIGHT,
color=arcade.color.BLACK,
font_size=15,
font_name=self.font,
align='center')
arcade.draw_text(f"Chosen R groups: {self.mol.at[0, 'atag']}, {self.mol.at[0, 'btag']}",
4 / 6 * SCREEN_WIDTH + 20,
390,
font_size=15,
font_name=self.font,
color=arcade.color.BLACK)
# draw the chosen molecule
self.mol_sprite_list.draw()
# draw text showing total balances
arcade.draw_text(f"Total balance: ${global_vars.balance}",
4 / 6 * SCREEN_WIDTH + 20,
360,
font_size=15,
font_name=self.font,
color=arcade.color.BLACK)
if global_vars.balance <= 0: # if balance is negative, text appears in red
arcade.draw_text(f"Total balance: ${global_vars.balance}",
4 / 6 * SCREEN_WIDTH + 20,
360,
font_size=15,
font_name=self.font,
color=arcade.color.DARK_CANDY_APPLE_RED)
arcade.draw_text(f"Time remaining: {global_vars.time} weeks",
4 / 6 * SCREEN_WIDTH + 20,
330,
font_size=15,
font_name=self.font,
color=arcade.color.BLACK)
if global_vars.time <= 0: # if time remaining is negative, text appears in red
arcade.draw_text(f"Time remaining: {global_vars.time} weeks",
4 / 6 * SCREEN_WIDTH + 20,
330,
font_size=15,
font_name=self.font,
color=arcade.color.DARK_CANDY_APPLE_RED)
# draw the molecule report section
arcade.draw_rectangle_filled((1 / 3 * SCREEN_WIDTH),
(1 / 2 * SCREEN_HEIGHT),
(2 / 3 * SCREEN_WIDTH),
(3 / 5 * SCREEN_HEIGHT),
color=arcade.color.BLACK)
arcade.draw_rectangle_filled((1 / 3 * SCREEN_WIDTH),
(1 / 2 * SCREEN_HEIGHT),
(2 / 3 * SCREEN_WIDTH),
(3 / 5 * SCREEN_HEIGHT - 10),
color=arcade.color.WHITE)
arcade.draw_text('Molecule report',
SCREEN_WIDTH / 3 - 100,
SCREEN_HEIGHT * 3 / 4 - 10,
font_size=18,
font_name=self.font,
color=arcade.color.BLACK)
# draw the instructions text (separated into lines so that they fit on the screen)
instructions = ['Welcome to the feedback screen. Here you',
'can run assays on your chosen molecule,',
'calculate descriptors and run substructure',
'filters. Running assays costs time and money,',
'which will be deducted from your total bal-',
'ance above. Press the R key to see a summ-',
'mary of all molecules made so far, or press',
'the L key to return to the molecule builder.']
for i, t in enumerate(instructions):
arcade.draw_text(t, 4 / 6 * SCREEN_WIDTH + 20, 320 - (i + 1) * 20, color=arcade.color.BLACK, font_name=self.font)
# draw the assay results
arcade.draw_text('Assay results:',
30,
SCREEN_HEIGHT * 7 / 10 - 25,
font_size=15,
font_name=self.font,
color=arcade.color.BLACK)
for i, (assa, res) in enumerate(zip(self.assay_choices_print, self.assay_results_print)):
arcade.draw_text(ASSAYS[assa]['name'],
30,
SCREEN_HEIGHT - 240 - (i * 20),
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
arcade.draw_text(res,
160,
SCREEN_HEIGHT - 240 - (i * 20),
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
# draw the top command buttons
arcade.draw_text('Commands',
10,
SCREEN_HEIGHT - 50,
font_size=15,
font_name=self.font,
color=arcade.color.WHITE)
arcade.draw_text('Free calculations',
2 / 9 * SCREEN_WIDTH + 10,
SCREEN_HEIGHT - 50,
font_size=15,
font_name=self.font,
color=arcade.color.WHITE)
self.button_list.draw()
# draw the total cost and duration of the selected assays
arcade.draw_text('Total cost to run assays',
4 / 9 * SCREEN_WIDTH + 10,
SCREEN_HEIGHT - 50,
font_size=15,
font_name=self.font,
color=arcade.color.WHITE)
cost_text = f"Total cost: ${self.total_cost}"
arcade.draw_text(cost_text,
4 / 9 * SCREEN_WIDTH + 10,
SCREEN_HEIGHT - 75,
color=arcade.color.WHITE,
font_size=10,
font_name=self.font)
if self.total_duration == []:
duration_text = "Total duration: 0 weeks"
else: # assumes assays are run in parallel (records the longest assay in the selection)
duration_text = f"Total duration: {max(self.total_duration)} weeks"
arcade.draw_text(duration_text,
4 / 9 * SCREEN_WIDTH + 10,
SCREEN_HEIGHT - 100,
color=arcade.color.WHITE,
font_size=10,
font_name=self.font)
# draw descriptor calculations
arcade.draw_text('Descriptors:',
SCREEN_WIDTH * 1 / 3 + 10,
SCREEN_HEIGHT * 7 / 10 - 25,
color=arcade.color.BLACK,
font_size=15,
font_name=self.font)
desc_dict = self.descriptor_results.copy() # copy dictionary
desc_dict = self.add_units(desc_dict) # add units to MW and TPSA
for i, (desc, val) in enumerate(desc_dict.items()):
arcade.draw_text(DESC_NAMES[desc],
SCREEN_WIDTH * 1 / 3 + 10,
SCREEN_HEIGHT - 240 - (i * 20),
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
arcade.draw_text(str(val),
SCREEN_WIDTH * 1 / 3 + 130,
SCREEN_HEIGHT - 240 - (i * 20),
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
# draw filter results
arcade.draw_text('Filters:',
30,
SCREEN_HEIGHT * 3 / 7 - 30,
color=arcade.color.BLACK,
font_size=15,
font_name=self.font)
for i, (filt, val) in enumerate(self.filter_results.items()):
if isinstance(val, list):
val = ', '.join(val)
val = val.replace('_', ' ').replace('[', '').replace(']', '')
arcade.draw_text(filt,
30,
SCREEN_HEIGHT / 2 - 100 - i * 20,
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
arcade.draw_text(str(val),
160,
SCREEN_HEIGHT / 2 - 100 - i * 20,
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
# give result of Lipinski's ro5
if self.descriptor_results != {}:
ro5_v, ro5_res = lipinski(self.descriptor_results)
lipinski_text = f"Molecule {ro5_res} Lipinski filter ({ro5_v} rules broken)"
arcade.draw_text(lipinski_text,
30,
SCREEN_HEIGHT / 2 - 100 - 4 * 20,
color=arcade.color.BLACK,
font_size=10,
font_name=self.font)
# draw hover text
if self.display_hover:
self.draw_hover()
def on_update(self, delta_time: float):
"""
Checks to see if the user is hovering over a sprite looking for help
"""
# specify which sprites have help text
hovered = arcade.get_sprites_at_point(self.location, self.button_list)
self.display_hover = False
if len(hovered) == 1:
if self.hovered != hovered[-1]: # if hovering over something new
self.hovered = hovered[-1] # store the sprite that's being hovered over
self.hover_time = 0
else:
self.hover_time += delta_time
if self.hover_time > 1:
self.display_hover = True # feeds back into on_draw()
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):
"""
Update mouse location
"""
self.location = (x, y)
def on_mouse_press(self, x, y, button, modifiers):
"""
Called when the user presses a mouse button. Used for determining what happens
when the user clicks on a button.
"""
# identifies what button the user clicks on
clicked = arcade.get_sprites_at_point((x, y), self.button_list)
if len(clicked) > 0: # checks a button has been clicked
choice = clicked[0]
# checks if the button is for an assay
# the assay name, result, cost and duration are stored
if choice.button in ASSAYS.keys():
if choice.button not in self.assay_choices: # check the assay hasn't already been clicked
# if the user has run out of time or money, they cannot run any more assays
if (global_vars.balance <= 0) or (global_vars.time <= 0):
print('You have run out of resources. You cannot run any more assays.')
# if the user does not have enough time or money remaining for that assay, they cannot run the assay
elif (global_vars.balance - choice.get_cost() < 0) or (global_vars.time - choice.get_duration() < 0):
print('You do not have enough resources to run that assay.')
# if the user has the time and money required for the assay, they can select it
else:
# if the assay df is empty or if no assays have been run on the mol, then add the chosen assay to the assay list as normal
if (self.mol_view.assay_df.empty or
self.mol_view.assay_df.loc[(self.mol_view.assay_df['atag'] == self.tags[0]) & (self.mol_view.assay_df['btag'] == self.tags[1]), 'atag'].values.size == 0):
choice._set_color(arcade.color.YELLOW) # selected buttons are changed to yellow
self.assay_choices.append(choice.button)
self.assay_results.append(choice.get_result())
self.total_cost += choice.get_cost() # tally the cost and duration of the selected assays
self.total_duration.append(choice.get_duration())
# if assays have been run but not the assay that has been selected, get the information of both the assays already run and the assay to run (so all info can be displayed)
elif | pd.isnull(self.mol_view.assay_df.loc[(self.mol_view.assay_df['atag'] == self.tags[0]) & (self.mol_view.assay_df['btag'] == self.tags[1]), choice.button].values[0]) | pandas.isnull |
__author__ = 'lucabasa'
__version__ = '5.1.0'
__status__ = 'development'
import pandas as pd
import numpy as np
from source.aggregated_stats import process_details, full_stats, rolling_stats
from source.add_info import add_seed, add_rank, highlow_seed, add_stage, add_quality
def make_teams_target(data, league):
'''
Take the playoff compact data and double the dataframe by inverting W and L
It also creates the ID column
data: playoff compact results
league: men or women, useful to know when to cut the data
'''
if league == 'men':
limit = 2003
else:
limit = 2010
df = data[data.Season >= limit].copy()
df['Team1'] = np.where((df.WTeamID < df.LTeamID), df.WTeamID, df.LTeamID)
df['Team2'] = np.where((df.WTeamID > df.LTeamID), df.WTeamID, df.LTeamID)
df['target'] = np.where((df['WTeamID'] < df['LTeamID']), 1, 0)
df['target_points'] = np.where((df['WTeamID'] < df['LTeamID']), df.WScore - df.LScore, df.LScore - df.WScore)
df.loc[df.WLoc == 'N', 'LLoc'] = 'N'
df.loc[df.WLoc == 'H', 'LLoc'] = 'A'
df.loc[df.WLoc == 'A', 'LLoc'] = 'H'
df['T1_Loc'] = np.where((df.WTeamID < df.LTeamID), df.WLoc, df.LLoc)
df['T2_Loc'] = np.where((df.WTeamID > df.LTeamID), df.WLoc, df.LLoc)
df['T1_Loc'] = df['T1_Loc'].map({'H': 1, 'A': -1, 'N': 0})
df['T2_Loc'] = df['T2_Loc'].map({'H': 1, 'A': -1, 'N': 0})
reverse = data[data.Season >= limit].copy()
reverse['Team1'] = np.where((reverse.WTeamID > reverse.LTeamID), reverse.WTeamID, reverse.LTeamID)
reverse['Team2'] = np.where((reverse.WTeamID < reverse.LTeamID), reverse.WTeamID, reverse.LTeamID)
reverse['target'] = np.where((reverse['WTeamID'] > reverse['LTeamID']),1,0)
reverse['target_points'] = np.where((reverse['WTeamID'] > reverse['LTeamID']),
reverse.WScore - reverse.LScore,
reverse.LScore - reverse.WScore)
reverse.loc[reverse.WLoc == 'N', 'LLoc'] = 'N'
reverse.loc[reverse.WLoc == 'H', 'LLoc'] = 'A'
reverse.loc[reverse.WLoc == 'A', 'LLoc'] = 'H'
reverse['T1_Loc'] = np.where((reverse.WTeamID > reverse.LTeamID), reverse.WLoc, reverse.LLoc)
reverse['T2_Loc'] = np.where((reverse.WTeamID < reverse.LTeamID), reverse.WLoc, reverse.LLoc)
reverse['T1_Loc'] = reverse['T1_Loc'].map({'H': 1, 'A': -1, 'N': 0})
reverse['T2_Loc'] = reverse['T2_Loc'].map({'H': 1, 'A': -1, 'N': 0})
df = pd.concat([df, reverse], ignore_index=True)
to_drop = ['WScore','WTeamID', 'LTeamID', 'LScore', 'WLoc', 'LLoc', 'NumOT']
for col in to_drop:
del df[col]
df.loc[:,'ID'] = df.Season.astype(str) + '_' + df.Team1.astype(str) + '_' + df.Team2.astype(str)
return df
def make_training_data(details, targets):
'''
details: seasonal stats by team
targets: result of make_teams_target with each playoff game present twice
Add the prefix T1_ and T2_ to the seasonal stats and add it to the playoff game
This creates the core training set where we use seasonal stats to predict the playoff games
Add the delta_ statistics, given by the difference between T1_ and T2_
'''
tmp = details.copy()
tmp.columns = ['Season', 'Team1'] + \
['T1_'+col for col in tmp.columns if col not in ['Season', 'TeamID']]
total = pd.merge(targets, tmp, on=['Season', 'Team1'], how='left')
tmp = details.copy()
tmp.columns = ['Season', 'Team2'] + \
['T2_'+col for col in tmp.columns if col not in ['Season', 'TeamID']]
total = pd.merge(total, tmp, on=['Season', 'Team2'], how='left')
if total.isnull().any().any():
print(total.columns[total.isnull().any()])
raise ValueError('Something went wrong')
stats = [col[3:] for col in total.columns if 'T1_' in col and 'region' not in col]
for stat in stats:
total['delta_'+stat] = total['T1_'+stat] - total['T2_'+stat]
try:
total['delta_off_edge'] = total['T1_off_rating'] - total['T2_def_rating']
total['delta_def_edge'] = total['T2_off_rating'] - total['T1_def_rating']
except KeyError:
pass
return total
def prepare_data(league):
save_loc = 'processed_data/' + league + '/'
if league == 'women':
regular_season = 'data/raw_women/WDataFiles_Stage2/WRegularSeasonDetailedResults.csv'
playoff = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyCompactResults.csv'
seed = 'data/raw_women/WDataFiles_Stage2/WNCAATourneySeeds.csv'
rank = None
stage2 = 'data/raw_women/WDataFiles_Stage2/WSampleSubmissionStage2.csv'
stage2_yr = 2021
save_loc = 'data/processed_women/'
else:
regular_season = 'data/raw_men/MDataFiles_Stage2/MRegularSeasonDetailedResults.csv'
playoff = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyCompactResults.csv'
seed = 'data/raw_men/MDataFiles_Stage2/MNCAATourneySeeds.csv'
rank = 'data/raw_men/MDataFiles_Stage2/MMasseyOrdinals.csv'
stage2 = 'data/raw_men/MDataFiles_Stage2/MSampleSubmissionStage2.csv'
stage2_yr = 2021
save_loc = 'data/processed_men/'
# Season stats
reg = pd.read_csv(regular_season)
reg = process_details(reg, rank)
reg.to_csv(save_loc + 'game_details_regular_extended.csv', index=False)
regular_stats = full_stats(reg)
# Last 2 weeks stats
last2weeks = reg[reg.DayNum >= 118].copy()
last2weeks = full_stats(last2weeks)
last2weeks.columns = ['L2W_' + col for col in last2weeks]
last2weeks.rename(columns={'L2W_Season': 'Season', 'L2W_TeamID': 'TeamID'}, inplace=True)
regular_stats = pd.merge(regular_stats, last2weeks, on=['Season', 'TeamID'], how='left')
regular_stats = add_seed(seed, regular_stats)
# Playoff stats
play = pd.read_csv(playoff)
play = process_details(play)
play.to_csv(save_loc + 'game_details_playoff_extended.csv', index=False)
playoff_stats = full_stats(play)
playoff_stats = add_seed(seed, playoff_stats)
if rank:
regular_stats = add_rank(rank, regular_stats)
playoff_stats = add_rank(rank, playoff_stats)
# Target data generation
target_data = pd.read_csv(playoff_compact)
target_data = make_teams_target(target_data, league)
# Add high and low seed wins perc
regular_stats = highlow_seed(regular_stats, reg, seed)
all_reg = make_training_data(regular_stats, target_data)
all_reg = all_reg[all_reg.DayNum >= 136] # remove pre tourney
all_reg = add_stage(all_reg)
all_reg = add_quality(all_reg, reg)
all_reg.to_csv(save_loc + 'training_data.csv', index=False)
playoff_stats.to_csv(save_loc + 'playoff_stats.csv', index=False)
if stage2:
test_data_reg = regular_stats[regular_stats.Season == stage2_yr].copy()
sub = pd.read_csv(stage2)
sub['Team1'] = sub['ID'].apply(lambda x: int(x[5:9]))
sub['Team2'] = sub['ID'].apply(lambda x: int(x[10:]))
tmp = sub.copy()
tmp = tmp.rename(columns={'Team1': 'Team2', 'Team2': 'Team1'})
tmp = tmp[['Team1', 'Team2', 'Pred']]
sub = pd.concat([sub[['Team1', 'Team2', 'Pred']], tmp], ignore_index=True)
sub['Season'] = stage2_yr
test_data = make_training_data(test_data_reg, sub)
test_data = add_stage(test_data)
test_data = add_quality(test_data, reg[reg.Season == stage2_yr])
test_data.to_csv(save_loc + f'{stage2_yr}_test_data.csv', index=False)
return all_reg, test_data
return all_reg
def prepare_competitive(league):
if league == 'women':
regular_season = 'data/raw_women/WDataFiles_Stage2/WRegularSeasonDetailedResults.csv'
playoff = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyDetailedResults.csv'
rank = None
season_info = 'data/raw_women/WDataFiles_Stage2/WSeasons.csv'
events_data = 'data/processed_women/events.csv'
save_loc = 'data/processed_women/'
else:
regular_season = 'data/raw_men/MDataFiles_Stage2/MRegularSeasonDetailedResults.csv'
playoff = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyCompactResults.csv'
rank = 'data/raw_men/MDataFiles_Stage2/MMasseyOrdinals.csv'
season_info = 'data/raw_men/MDataFiles_Stage2/MSeasons.csv'
events_data = 'data/processed_men/events.csv'
save_loc = 'data/processed_men/'
reg = pd.read_csv(regular_season)
reg = process_details(reg, rank)
play = pd.read_csv(playoff)
play = process_details(play)
full = pd.concat([reg, play])
events = pd.read_csv(events_data)
to_use = [col for col in events if not col.endswith('_game') and
'FinalScore' not in col and
'n_OT' not in col and
'_difference' not in col]
full = | pd.merge(full, events[to_use], on=['Season', 'DayNum', 'WTeamID', 'LTeamID']) | pandas.merge |
import click
import os
import csv
import re
import functools
import pandas as pd
import numpy as np
import datetime
import common
import shutil
class InvalidSubscenario(Exception):pass
class CSVLocation(object):
"""Documentation for CSVLocation
class which acts as wrapper over folder, csv_location
"""
def __init__(self, csv_location):
"""
param csv_location - a path where all csvs are strored in gridpath format
"""
self.csv_location = csv_location
def get_scenarios_csv(self):
return os.path.join(self.csv_location, "scenarios.csv")
def get_csv_data_master(self):
return os.path.join(self.csv_location, "csv_data_master.csv")
class Scenario(CSVLocation):
"""Documentation for Scenario
it stores all subscenarios in given scenario
"""
def __init__(self, csv_location, scenario_name):
super().__init__(csv_location)
scenarios_csv = self.get_scenarios_csv()
self.scenario_name = scenario_name
self.subscenarios = {}
with open(scenarios_csv) as f:
csvf = csv.DictReader(f)
for row in csvf:
subscenario_name = row['optional_feature_or_subscenarios']
subscenario_id = row[scenario_name]
if subscenario_id.strip()!="":
self.subscenarios[subscenario_name] = int(subscenario_id)
setattr(self, subscenario_name, int(subscenario_id))
def get_subscenarios(self):
return [Subscenario(name, v, self.csv_location) for name, v in self.subscenarios.items()]
def get_subscenario(self, name):
if name in self.subscenarios:
return Subscenario(name, self.subscenarios[name], self.csv_location)
else:
raise KeyError(f"Scenario {self.scenario_name} does not have subscenario {name}")
def __str__(self):
return f"Senario<{self.scenario_name}>"
def __repr__(self):
return str(self)
def test_scenario_class():
rpo30 = Scenario("/home/vikrant/programming/work/publicgit/gridpath/db/csvs_mh", "rpo30")
assert rpo30.scenario_name == "rpo30"
assert rpo30.csv_location == "/home/vikrant/programming/work/publicgit/gridpath/db/csvs_mh"
assert rpo30.temporal_scenario_id == 5
assert rpo30.load_zone_scenario_id == 1
assert rpo30.load_scenario_id == 1
assert rpo30.project_portfolio_scenario_id == 1
assert rpo30.project_operational_chars_scenario_id == 3
assert rpo30.project_availability_scenario_id == 3
assert rpo30.project_load_zone_scenario_id == 1
assert rpo30.project_specified_capacity_scenario_id == 1
assert rpo30.project_specified_fixed_cost_scenario_id == 1
assert rpo30.solver_options_id == 1
assert rpo30.temporal_scenario_id == 5
class Subscenario(CSVLocation):
"""Documentation for Scenario
"""
def __init__(self, name, id_, csv_location):
super().__init__(csv_location)
self.name = name
self.id_ = id_
try:
self.__find_files()
except Exception as e:
print(e)
print("Creating empty Subscenario")
@functools.lru_cache(maxsize=None)
def __getattr__(self, name):
files = [os.path.basename(f) for f in self.files]
attrs = [".".join(f.split(".")[:-1]) for f in files]
if name in attrs:
file = [f for f in self.get_files() if f.endswith(f"{name}.csv")][0]
return pd.read_csv(file)
elif name == "data":
file = [f for f in self.get_files()][0]
return | pd.read_csv(file) | pandas.read_csv |
#import the necessary packages
import requests
import pandas as pd
import numpy as np
import json
import time
import os
import datetime
import re
def jprint(obj): #create function so it directly prints
#created a formatted string of the Python object
text = json.dumps(obj, sort_keys=True, indent=4)
print(text)
#second step, check the publication date and the Presidential ID
#exclude rules not under the incoming President
# set file path
filePath =r'C:/Users/16192/OneDrive/Documents/'
# load json file
#fileName = 'clinton_bush_midnight_rules.json'
#fileName = 'bush_obama_midnight_rules.json'
#fileName = 'obama_trump_midnight_rules.json'
fileName = 'trump_biden_midnight_rules.json'
with open(filePath+fileName, 'r', encoding='utf-8') as loadfile:
dctsRules = json.load(loadfile)
print('Retrieval date: '+dctsRules['dateRetrieved'])
# create df
#replace agency column with just agency names
for i in range(len(dctsRules['results'])):
string = dctsRules['results'][i]['agency_names']
dctsRules['results'][i]['agency_names'] = ', '.join(string)
#for i in range(len(dctsRules['results'])):
#dctsRules['results'][i]['president'] = dctsRules['results'][i]['president']['name'] #replace president column with just the name
for i in range(len(dctsRules['results'])):
string = dctsRules['results'][i]['regulation_id_numbers']
dctsRules['results'][i]['regulation_id_numbers'] = ', '.join(string)
#regular_list = dctsRules['results'][i]['regulation_id_numbers'] #need to flatten this list, https://stackabuse.com/python-how-to-flatten-list-of-lists/
#flat_list = [item for sublist in regular_list for item in sublist]
#dctsRules['results'][i]['regulation_id_numbers'] = ', '.join(flat_list)
for i in range(len(dctsRules['results'])): #need to turn the string or else Excel messes up the format (doesn't work for csv)
dctsRules['results'][i]['document_number'] = str(dctsRules['results'][i]['document_number'])
#need to combine the names (sometimes multiple agencies) into a single string: https://stackoverflow.com/questions/12453580/how-to-concatenate-items-in-a-list-to-a-single-string
#to xlsx: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_excel.html
dfRules = | pd.DataFrame(dctsRules['results']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 21:34:50 2020
@author: balderrama
"""
import pandas as pd
from random import shuffle
import numpy as no
#%%
data = pd.read_csv('Bolivia/Database_new.csv')
# creation of population for the years 2020 and 2030
test = pd.DataFrame()
test['Slopes'] = data['Pop2025High']/data['Pop']
b = (data['Pop2025High'] - data['Pop'])/(2025 - 2012)
test['Interceptors'] = data['Pop2025High'] - b*2025
test['Pop2025High'] = data['Pop2025High']
test['Pop2025Test'] = test['Interceptors'] + b*2025
test['Pop'] = data['Pop']
test['PopTest'] = test['Interceptors'] + b*2012
test['Pop2020'] = test['Interceptors'] + b*2020
test['Pop2030'] = test['Interceptors'] + b*2030
#print(test['Pop'].sum())
#print(test['Pop2020'].sum())
#print(test['Pop2025High'].sum())
#print(test['Pop2030'].sum())
#print(test['Pop2025High'].sum() - test['Pop2020'].sum())
#print(test['Pop2030'].sum() - test['Pop2025High'].sum())
data['Pop2020High'] = test['Pop2020']
data['Pop2030High'] = test['Pop2030']
# Change the name of column Elecpop to ElecPopCalib
data['ElecPopCalib'] = data['ElecPop']
data.to_csv('Bolivia/Database_new_1.csv')
data = data.drop('ElecPop',axis=1)
# change the wind
data['WindVel'] = 5
data['WindCF'] = 0.3
# Change small mistakes in elecstart 2012
data.loc[7797, 'ElecStart'] = 0
data.loc[9620, 'ElecStart'] = 0
data.loc[13070, 'ElecStart'] = 0
data.to_csv('Bolivia/Database_new_1.csv')
#%%
# analize the ElecStart/ not done yet
data = pd.read_csv('Bolivia/Database_new_1.csv')
test = | pd.DataFrame() | pandas.DataFrame |
from draw import draw_confusion_matrix, draw_correlation_heatmap, draw_roc, draw_rfecv, draw_feature_importance, plot_roc_values
from data_preprocess import feature_reduction
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Data Processing:
from sklearn.preprocessing import StandardScaler, scale
from sklearn.feature_selection import RFECV
from sklearn.model_selection import train_test_split, cross_val_score, KFold, cross_val_predict
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
names = [
"Nearest Neighbors", # 0.63
#"Decision Tree", # 0.54
"Neural Net", # 0.61
"AdaBoost", # 0.64
'LDA', # 0.62
'LogReg', # 0.68
#'XGBoost', # 0.59
#"QDA", # 0.55
#"Naive Bayes", # 0.52
#"Random Forest" # 0.53
]
tuned_parameters = [
{'n_neighbors': [3, 9, 14, 17, 18, 19, 25], 'p' : [1, 2]},
#{'max_depth': [5, 10, 20]},
{'alpha': [0.01, 0.1, 1,], 'activation' : ['identity', 'logistic', 'tanh', 'relu'], 'hidden_layer_sizes': [100, 200, 300]},
{'n_estimators': [10, 30, 50, 100, 200], 'learning_rate': [0.01, 0.1, 1, 10, 100]},
{'n_components': [2, 6, 12, 30]},
{'C': [0.01, 0.1, 1, 10, 100, 1000, 3000], 'penalty': ['l1', 'l2']},
#{'n_estimators': [3, 10, 100, 300], 'learning_rate': [0.001, 0.01, 0.1, 1, 10]},
#{'tol': [1.0e-3, 1.0e-4, 1.0e-5]}, # QDA
#{}, # Naive Bayes
#{'n_estimators': [3, 10, 30, 90], 'max_features': ['auto', 'log2', None]} # Random Forest
]
classifiers_default = [
KNeighborsClassifier(), #{n_neighbors = 18, p = 1}
#DecisionTreeClassifier(), #max_depth=10
MLPClassifier(), #{activation = 'tanh', alpha=0.01, hidden_layer_sizes = 200}
AdaBoostClassifier(), #{learning_rate = 0.1, n_estimators = 200}
LinearDiscriminantAnalysis(), #{n_components=2}
LogisticRegression(), #{C = 1000, penalty = 'l2'}
GradientBoostingClassifier(), #{n_estimators=300,learning_rate=0.1}
#QuadraticDiscriminantAnalysis(),
#GaussianNB(),
#RandomForestClassifier() #
]
classifiers_first = [
KNeighborsClassifier(n_neighbors=25, p=1), # n_neighbors=25, p=1
#DecisionTreeClassifier(), #max_depth=10
MLPClassifier(activation='tanh', alpha=0.01, hidden_layer_sizes=300), # activation='logistic', alpha=0.1, hidden_layer_sizes=300
AdaBoostClassifier(learning_rate = 1, n_estimators = 100), # learning_rate = 1, n_estimators = 30
LinearDiscriminantAnalysis(n_components=2), # n_components=2
LogisticRegression(C = 1, penalty = 'l2'), # C = 0.01, penalty = 'l2'
#GradientBoostingClassifier(n_estimators=300,learning_rate=0.01), #{n_estimators=300,learning_rate=0.01}
#QuadraticDiscriminantAnalysis(), # 'tol': 0.001
#GaussianNB(),
#RandomForestClassifier() # 'max_features': 'auto', 'n_estimators': 90
]
classifiers_second = [
KNeighborsClassifier(n_neighbors=18, p=1), # n_neighbors=25, p=1
#DecisionTreeClassifier(), #max_depth=5
MLPClassifier(activation='tanh', alpha=1, hidden_layer_sizes=200), # activation='logistic', alpha=0.1, hidden_layer_sizes=300
AdaBoostClassifier(learning_rate = 0.01, n_estimators = 10), # learning_rate = 1, n_estimators = 30
LinearDiscriminantAnalysis(n_components=2), # n_components=2
LogisticRegression(C = 0.01, penalty = 'l2'), # C = 0.01, penalty = 'l2'
#GradientBoostingClassifier(n_estimators=300,learning_rate=0.01), #{n_estimators=300,learning_rate=0.01}
#QuadraticDiscriminantAnalysis(), # 'tol': 0.001
#GaussianNB(),
#RandomForestClassifier() # 'max_features': 'auto', 'n_estimators': 90
]
classifiers_third = [
KNeighborsClassifier(n_neighbors=25, p=1), # n_neighbors=25, p=1
#DecisionTreeClassifier(), #max_depth=5
MLPClassifier(activation='logistic', alpha=0.1, hidden_layer_sizes=300), # activation='logistic', alpha=0.1, hidden_layer_sizes=300
AdaBoostClassifier(learning_rate = 1, n_estimators = 30), # learning_rate = 1, n_estimators = 30
LinearDiscriminantAnalysis(n_components=2), # n_components=2
LogisticRegression(C = 0.01, penalty = 'l2'), # C = 0.01, penalty = 'l2'
#GradientBoostingClassifier(n_estimators=300,learning_rate=0.01), #{n_estimators=300,learning_rate=0.01}
#QuadraticDiscriminantAnalysis(), # 'tol': 0.001
#GaussianNB(),
#RandomForestClassifier() # 'max_features': 'auto', 'n_estimators': 90
]
def default_classifiers(X_train, X_test, y_train, y_test, data_name):
cross_valid = 10
#no_rfecv = ['Nearest Neighbors', 'RBF SVM', 'Neural Net', 'Naive Bayes', 'QDA']
for clf, name in zip(classifiers_default, names):
print('######## {} - with CV #########'.format(name))
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print(score,'Test')
cv_scores = cross_val_score(clf, X_train, y_train, cv=cross_valid)
print(np.mean(cv_scores), 'CV')
# Plot ROC values:
plt.figure(2)
plot_roc_values(clf, X_test, y_test, name)
# Print Classification Report:
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
# Draw Confusion Matrix:
if name == 'XGBoost':
cm = confusion_matrix(y_test, y_pred)
plt.figure(3)
draw_confusion_matrix(cm, title='{} | {} - Confusion matrix'.format(data_name, name))
print_voting(X_train, y_train, X_test, y_test, data_name, parameters=1)
plt.figure(2)
draw_roc(data_name)
return
def grid_search_classifiers(X_train, X_test, y_train, y_test, data_name):
optimal_parameters =[]
cross_validation = 10
scores = [
'precision'
]
for clf_orig, name, tuned_parameter in zip(classifiers_default, names, tuned_parameters):
if name is '':
return
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print('########## {} ##########'.format(name))
clf = GridSearchCV(clf_orig, tuned_parameter, cv=cross_validation,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
optimal_parameters.append(clf.best_params_)
print("Best parameters set found on development set:")
print(clf.best_params_)
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
return optimal_parameters
def print_voting(X, y, X_test, y_test, data_name, parameters=None):
print('###### Voting - {} ######'.format(data_name))
# Voting Ensemble for Classification
from sklearn.ensemble import VotingClassifier
kfold = KFold(n_splits=10, random_state=42)
# Create the sub models
estimators = []
for name, clf in zip(names, classifiers_default):
estimators.append((name, clf))
# Create the ensemble model
ensemble = VotingClassifier(estimators, voting='soft')
results = cross_val_score(ensemble, X, y, cv=kfold)
ensemble.fit(X, y)
# Plot Voting data for ROC curve
if parameters == 1:
plt.figure(2)
plot_roc_values(ensemble, X_test, y_test, 'Voting')
return
y_true, y_pred = y_test, ensemble.predict(X_test)
print("Detailed classification report:")
print()
print(classification_report(y_true, y_pred))
print('CV-result', results.mean())
print()
print('-----------------')
print()
# Draw Voting Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
draw_confusion_matrix(cm, title='{} | {} - Confusion matrix'.format(data_name, 'Voting'))
if __name__ == '__main__':
# Import data to pandas
df = pd.read_csv('finaldata.csv')
#####
# Shaping Data
first_drop = ['Id', 'Age', 'AgeBinEW', 'Balance', 'BalanceBin', 'LastContactDay', 'WithinMonth', 'PrevAttempsToDaysPassed', 'Id_duplicate', 'PAtDPEF'] # Waste
second_drop = ['Communication', 'LastContactMonth', "NoOfContacts", "CallDuration", "CallHour", 'WithinMonthEF']
third_drop = ["PrevAttempts", "Outcome", "DaysPassed"]
df_first_data = df.drop(first_drop, axis=1)
df_second_data = df_first_data.drop(second_drop, axis=1)
df_third_data = df_second_data.drop(third_drop, axis=1)
datas = [df_first_data, df_second_data, df_third_data]
data_names = ['First Data', 'Second Data', 'Third Data']
#####
for data, data_name in zip(datas, data_names):
print('\n\n>>>>>>>>>>>>>>>> {} <<<<<<<<<<<<<<<<<\n'.format(data_name))
data = | pd.get_dummies(data, drop_first=True) | pandas.get_dummies |
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
from tqdm import tqdm
import networkx as nx
import markov_clustering as mc
import networkx as nx
import random
from tqdm import tqdm
from sklearn.model_selection import KFold
from code_py.DIAMOnD import *
from sklearn.preprocessing import normalize
from operator import itemgetter
from scipy.stats import hypergeom
import pickle
import statistics
import math
import statistics
class Human_Genes_Graph_Analysis:
def __init__(self,folder_path,disease_ID):
self.folder_path = folder_path
self.data_path = folder_path + "data/"
self.disease_ID = disease_ID
super(Human_Genes_Graph_Analysis, self).__init__()
# ============================ PREPROCESSING
def preprocessing_dataset(self, homo_sap=True, drop_duplicates=True, remove_self_loops=True, write_txt = True):
"""
filtering dataset
:params homo_sap: bool -> filtering the dataset accordingly homo sapiens genes
:params drop_duplicates: bool -> removes the dusplicates in the dataset
:params remove_self_loops: bool -> removes the self loops from the ppi
:write_txt: bool -> writes output txt file
"""
self.homo_sapiens_genes = pd.read_csv(self.data_path+'BIOGRID-ORGANISM-Homo_sapiens-4.4.204.tab3.txt', sep='\t', header=0,low_memory=False)
if homo_sap:
self.homo_sapiens_genes = self.homo_sapiens_genes[(self.homo_sapiens_genes["Experimental System Type"]=='physical')]
self.homo_sapiens_genes = self.homo_sapiens_genes[(self.homo_sapiens_genes["Organism ID Interactor A"]==9606) & (self.homo_sapiens_genes["Organism ID Interactor B"]==9606)]
self.trial = self.homo_sapiens_genes
if write_txt:
self.homo_sapiens_genes[['Official Symbol Interactor A', 'Official Symbol Interactor B']].to_csv(self.folder_path +'data/Biogrid_4.4.204.txt', header=None, index=None, sep=' ', mode='a')
if drop_duplicates:
self.homo_sapiens_genes = self.homo_sapiens_genes.drop_duplicates()
if remove_self_loops:
self.homo_sapiens_genes = self.homo_sapiens_genes[(self.homo_sapiens_genes['Official Symbol Interactor A'] != self.homo_sapiens_genes['Official Symbol Interactor B'])]
return self.homo_sapiens_genes
# ============================ QUERY DISEASE GENES
def query_disease_genes(self):
"""
Filter on the diseases curated dataset according to the input disease query
"""
self.diseases = pd.read_csv(self.data_path+"curated_gene_disease_associations.tsv", sep='\t')
self.disease_query = self.diseases[self.diseases["diseaseId"]==self.disease_ID]
self.disease_list = list(self.disease_query['geneSymbol'])
print("Found " + str(len(self.disease_list)) + ' disease genes in ' + str(self.disease_query['diseaseName'].values[0]))
return self.disease_query,self.disease_list
# ============================ QUERY DISEASE GENES EXTENDED
def query_disease_genes_extendend(self):
"""
Filter on the diseases all genes associatiations dataset according to the input disease query
"""
self.diseases_ex = | pd.read_csv(self.data_path+"all_gene_disease_associations.tsv", sep='\t') | pandas.read_csv |
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import re
import numpy as np
import numpy.testing as npt
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import unittest
# ========================================================================
#
# Some defaults variables
#
# ========================================================================
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif', serif='Times')
cmap_med = [
"#F15A60",
"#7AC36A",
"#5A9BD4",
"#FAA75B",
"#9E67AB",
"#CE7058",
"#D77FB4",
"#737373",
]
cmap = [
"#EE2E2F",
"#008C48",
"#185AA9",
"#F47D23",
"#662C91",
"#A21D21",
"#B43894",
"#010202",
]
dashseq = [
(None, None),
[10, 5],
[10, 4, 3, 4],
[3, 3],
[10, 4, 3, 4, 3, 4],
[3, 3],
[3, 3],
]
markertype = ["s", "d", "o", "p", "h"]
# ========================================================================
#
# Function definitions
#
# ========================================================================
def load_pelec_error(fdir, theory_order):
"""Load the error for each resolution"""
lst = []
resolutions = sorted(
[
int(f)
for f in os.listdir(fdir)
if os.path.isdir(os.path.join(fdir, f)) and re.match("^[0-9]+$", f)
],
key=int,
)
resdirs = [os.path.join(fdir, str(res)) for res in resolutions]
for k, (res, resdir) in enumerate(zip(resolutions, resdirs)):
fname = os.path.join(resdir, "mmslog")
df = pd.read_csv(fname, delim_whitespace=True)
idx = -1
print(
"Loading {0:d} at t = {1:e} (step = {2:d})".format(
res, df["time"].iloc[idx], df.index[idx]
)
)
lst.append(
[
res,
1.0 / res,
df["rho_mms_err"].iloc[idx],
df["u_mms_err"].iloc[idx],
df["v_mms_err"].iloc[idx],
df["w_mms_err"].iloc[idx],
df["p_mms_err"].iloc[idx],
]
)
edf = pd.DataFrame(
lst,
columns=[
"resolution",
"dx",
"rho_mms_err",
"u_mms_err",
"v_mms_err",
"w_mms_err",
"p_mms_err",
],
)
# Theoretical error
idx = 1
edf["rho_theory"] = (
edf["rho_mms_err"].iloc[idx]
* (edf["resolution"].iloc[idx] / edf["resolution"]) ** theory_order
)
edf["u_theory"] = (
edf["u_mms_err"].iloc[idx]
* (edf["resolution"].iloc[idx] / edf["resolution"]) ** theory_order
)
edf["v_theory"] = (
edf["v_mms_err"].iloc[idx]
* (edf["resolution"].iloc[idx] / edf["resolution"]) ** theory_order
)
edf["w_theory"] = (
edf["w_mms_err"].iloc[idx]
* (edf["resolution"].iloc[idx] / edf["resolution"]) ** theory_order
)
edf["p_theory"] = (
edf["p_mms_err"].iloc[idx]
* (edf["resolution"].iloc[idx] / edf["resolution"]) ** theory_order
)
return edf.loc[:, (edf != 0).any(axis=0)]
def calculate_ooa(edf):
"""Calculate the order of accuracy given an error dataframe."""
sfx_mms = "_mms_err"
fields = [re.sub(sfx_mms, "", col) for col in edf.columns if col.endswith(sfx_mms)]
columns = []
data = np.zeros((len(edf["resolution"]) - 1, len(fields)))
for k, field in enumerate(fields):
columns.append(field + "_ooa")
data[:, k] = -np.diff(np.log(edf[field + sfx_mms])) / np.diff(
np.log(edf["resolution"])
)
ooa = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
import random
import pandas as pd
import pytest
from suda import suda, find_msu
@pytest.fixture
def data():
persons = [
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'primary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'male', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'rural', 'education': 'secondary complete', 'labourstatus': 'unemployed'},
{'gender': 'male', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'primary complete', 'labourstatus': 'non-LF'},
{'gender': 'male', 'region': 'urban', 'education': 'post-secondary', 'labourstatus': 'unemployed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'non-LF'}
]
return pd.DataFrame(persons)
@pytest.fixture
def large_data():
return pd.read_csv('test_data.csv')
# def test_performance(large_data):
# suda(large_data, 4)
def test_msu(data):
groups = [['gender', 'region']]
aggregations = {'msu': 'min', 'suda': 'sum', 'fK': 'min', 'fM': 'sum'}
for column in data.columns:
aggregations[column] = 'max'
results = find_msu(data, groups=groups, aggregations=aggregations, att=4)
results = results.fillna(0)
assert (results.loc[0].msu == 0)
assert (results.loc[1].msu == 0)
assert (results.loc[2].msu == 0)
assert(results.loc[3].msu == 0)
assert (results.loc[4].msu == 2)
assert (results.loc[5].msu == 0)
assert (results.loc[6].msu == 0)
assert(results.loc[7].msu == 0)
assert (results.loc[8].msu == 0)
assert (results.loc[9].msu == 0)
assert (results.loc[10].msu == 0)
def test_suda(data):
results = suda(data, max_msu=3)
print(results)
assert (results.loc[0].msu == 0)
assert (results.loc[1].msu == 0)
assert (results.loc[2].msu == 1)
assert(results.loc[3].msu == 0)
assert (results.loc[4].msu == 1)
assert (results.loc[5].msu == 0)
assert (results.loc[6].msu == 1)
assert(results.loc[7].msu == 1)
assert (results.loc[8].msu == 0)
assert (results.loc[9].msu == 0)
assert (results.loc[10].msu == 2)
assert (results.loc[0].suda == 0)
assert (results.loc[1].suda == 0)
assert (results.loc[2].suda == 15)
assert(results.loc[3].suda == 0)
assert (results.loc[4].suda == 20)
assert (results.loc[5].suda == 0)
assert (results.loc[6].suda == 15)
assert(results.loc[7].suda == 20)
assert (results.loc[8].suda == 0)
assert (results.loc[9].suda == 0)
assert (results.loc[10].suda == 5)
def test_suda_with_columns(data):
results = suda(data, max_msu=2, columns=['gender', 'region', 'education'])
# check we get back columns we didn't include in SUDA calcs
assert(results.loc[0].labourstatus == 'employed')
assert (results.loc[0].msu == 0)
assert (results.loc[1].msu == 0)
assert (results.loc[2].msu == 1)
assert(results.loc[3].msu == 0)
assert (results.loc[4].msu == 1)
assert (results.loc[5].msu == 0)
assert (results.loc[6].msu == 1)
assert(results.loc[7].msu == 1)
assert (results.loc[8].msu == 0)
assert (results.loc[9].msu == 0)
assert (results.loc[10].msu == 0)
assert (results.loc[0].suda == 0)
assert (results.loc[1].suda == 0)
assert (results.loc[2].suda == 4)
assert(results.loc[3].suda == 0)
assert (results.loc[4].suda == 4)
assert (results.loc[5].suda == 0)
assert (results.loc[6].suda == 4)
assert(results.loc[7].suda == 4)
assert (results.loc[8].suda == 0)
assert (results.loc[9].suda == 0)
assert (results.loc[10].suda == 0)
def test_suda_no_uniques():
persons = [
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'}, {'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'}
]
df = | pd.DataFrame(persons) | pandas.DataFrame |
import pandas as pd
def CSVWriter (iterable, outLoc, header="", ):
"""
Writes an iterable to a CSV file.
:param iterable: List of list
:param outLoc: file location. Where to place it.
:param header: header of the CSV file
:return: 1
"""
if not iterable:
print ("nothing to write")
return 0
out = open(outLoc, 'w')
if header:
out.write(header+'\n')
#Only works if iterable is a nested list
for member in iterable:
for item in member:
out.write(str(item)+',')
out.write('\n')
print("write to "+outLoc+" successful.")
return 1
def seqParser(seqLoc):
"""
Takes a FASTA formatted list of sequences and returns a properly formatted nested list
:param seqLoc: fasta formatted file
:return: nested list in the form [[name, seq],...]
"""
f=open(seqLoc,'r')
RNAseqs = f.readlines()
f.close()
RNAseqlist = []
for i in range(0, len(RNAseqs)):
if RNAseqs[i][0] == ">":
RNAseqlist.append([RNAseqs[i].rstrip()[1:],RNAseqs[i+1].rstrip()])
return RNAseqlist
def countsin(inLoc):
"""
Takes saved count file and reads it into a counts nested list.
:param inLoc: counts file
:return: nested list. counts nested list. [[read, total number, unique number],...]
"""
countFile = open(inLoc, "r").readlines()
counts=[]
for i in range(1, len(countFile)):
temp = countFile[i].rstrip().split(",")
counts.append([temp[0][8:], temp[1], temp[2]])
return counts
def tailParser(inLoc):
"""
parses .tail file into a nested list usable by other modules
:param inLoc: CSV input .tail file as produced by aligner.tailcalc
:return: nested list. [[sequence, #reads, gene, 3'end, tail len, tail seq],...]
"""
f = open(inLoc, 'r')
tails = f.readlines()
f.close()
tailList = []
for i in range(len(tails)):
if i==0: continue #skips the header
line = tails[i].rstrip().split(',')
tailList.append(line)
return tailList
def repeater(item, list, reps):
'''Takes an item and a list and then adds a copy of the item to the list reps number of times.'''
for i in range(reps):
list.append(item)
return
def pdTailMaker(inLoc):
"""
Takes standard tail file and returns a pandas dataframe
"""
tails = tailParser(inLoc)
pdTails = []
for tail in tails:
type = tail[2][tail[2].find("|")+1:]
name = tail[2][:tail[2].find("|")]
repeater([name,tail[3],tail[4],tail[5],type],pdTails,int(tail[1]))
df = | pd.DataFrame(pdTails,columns=['Gene','3Loc','TailLength','TailSeq', 'Type']) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
def load_hydroSystem(file_path):
'''
Função responsável pelo processamento e importação dos dados do Condition Monitoring of Hydraulic Systems Dataset
I/O:
path: uma string contendo o diretório onde os conjuntos de dados dos sensores estão contidos;
return: um Numpy Array de formato ((nº de instâncias, timestamp, features), label)
'''
# Listagem dos arquivos contendo os dados dos sensores
load_names = os.listdir(file_path)
load_names.remove('description.txt')
load_names.remove('documentation.txt')
# Indexição das colunas para o upsamplig das variáveis com maior taxa de amostragem
cols_1 = np.arange(0, 6000, 100)
cols_10 = np.arange(0, 6000, 10)
# Importação dos dados contidos nos arquivos ".txt"
# Features
pressure = []
flow = []
temp = []
print('Carregamento dos conjuntos de dados:')
for name in tqdm(load_names):
if 'PS' in name and name != 'EPS1.txt':
ps = pd.read_csv(f'{file_path}{name}', delimiter='\t', header=None)
pressure.append(ps)
elif 'FS' in name:
aux = pd.read_csv(f'{file_path}{name}', delimiter='\t', header=None)
fs = pd.DataFrame(data=np.nan*np.ones((aux.shape[0], 6000)))
fs[cols_10] = aux.values
fs = fs.interpolate(axis='columns')
flow.append(fs)
elif 'TS' in name:
aux = pd.read_csv(f'{file_path}{name}', delimiter='\t', header=None)
t = pd.DataFrame(data=np.nan*np.ones((aux.shape[0], 6000)))
t[cols_1] = aux.values
t = t.interpolate(axis='columns')
temp.append(t)
eps = pd.read_csv(f'{file_path}EPS1.txt', delimiter='\t', header=None)
vs = pd.read_csv(f'{file_path}VS1.txt', delimiter='\t', header=None)
ce = pd.read_csv(f'{file_path}CE.txt', delimiter='\t', header=None)
cp = pd.read_csv(f'{file_path}CP.txt', delimiter='\t', header=None)
se = pd.read_csv(f'{file_path}SE.txt', delimiter='\t', header=None)
aux_dfs = [vs, ce, cp, se]
mod_dfs = []
for df in aux_dfs:
aux = df.copy()
aux_df = pd.DataFrame(data=np.nan*np.ones((aux.shape[0], 6000)))
aux_df[cols_1] = aux.values
aux_df = aux_df.interpolate(axis='columns')
mod_dfs.append(aux_df)
# Labels
labels = pd.read_csv(f'{file_path}profile.txt', delimiter='\t', header=None)
labels = labels[0].copy()
# Concatenação dos dados
data = []
print('Processamento dos dados:')
for cycle in tqdm(range(2205)):
example = np.c_[
pressure[0].loc[cycle, :].values,
pressure[1].loc[cycle, :].values,
pressure[2].loc[cycle, :].values,
pressure[3].loc[cycle, :].values,
pressure[4].loc[cycle, :].values,
pressure[5].loc[cycle, :].values,
flow[0].loc[cycle, :].values,
flow[1].loc[cycle, :].values,
temp[0].loc[cycle, :].values,
temp[1].loc[cycle, :].values,
temp[2].loc[cycle, :].values,
temp[3].loc[cycle, :].values,
eps.loc[cycle, :].values,
mod_dfs[0].loc[cycle, :].values,
mod_dfs[1].loc[cycle, :].values,
mod_dfs[2].loc[cycle, :].values,
mod_dfs[3].loc[cycle, :].values]
data.append(example)
return np.array(data), labels
def create_hydrodf(array, labels):
'''
Função responsável por organizar o conjunto de dados Hydraulic Systems Dataset em uma forma tabular.
I/O:
array: numpy array contendo os dados em um formato tridimensional;
labels: lista, numpy array ou pandas series contendo os rótulos do conjunto de dados.
'''
df = pd.DataFrame()
label_exp = []
label = []
i = 0
# concatenação dos dados
for exp in tqdm(array):
df = pd.concat([df, | pd.DataFrame(exp) | pandas.DataFrame |
### ----------------- IMPORTS ----------------- ###
import os
from beartype import beartype
import numpy as np
import pandas as pd
from backend.adi_parse import AdiParse
from backend import search_function
from backend.get_all_comments import GetComments
### ------------------------------------------- ###
@beartype
def get_file_data(folder_path:str, channel_structures:dict):
"""
Get file data in dataframe
Parameters
----------
folder_path : str
channel_structures : dict, keys = total channels, values = channel list
Returns
-------
file_data : pd.DataFrame
"""
# make lower string and path type
folder_path = folder_path = os.path.normpath(folder_path.lower())
file_data = pd.DataFrame()
cntr = 0
# walk through all folders
for root, dirs, files in os.walk(folder_path):
# get labchart file list
filelist = list(filter(lambda k: '.adicht' in k, files))
for file in filelist: # iterate over list
# initiate adi parse object
adi_parse = AdiParse(os.path.join(root, file), channel_structures)
# get all file data in dataframe
temp_file_data = adi_parse.get_all_file_properties()
# add folder path
temp_file_data['folder_path'] = os.path.normcase(root)
# apppend to dataframe
file_data = file_data.append(temp_file_data, ignore_index = True)
cntr+=1
# convert data frame to lower case
file_data = file_data.apply(lambda x: x.astype(str).str.lower())
# convert file length to int
file_data['file_length'] = file_data['file_length'].astype(np.int64)
# make paths relative
file_data.folder_path = file_data.folder_path.str.replace(folder_path, '', regex=False)
file_data.folder_path = file_data.folder_path.map(lambda x: x.lstrip('\\'))
return file_data
def get_channel_structures(user_data):
"""
Get channel structure from labchart files based on user data
Parameters
----------
user_data : Dataframe with user data for SAKE input
Returns
-------
order : List with channels in order
"""
# define separator
separtor = '-'
# get data containing channel order
channel_structures = user_data[user_data['Source'] == 'total_channels'].reset_index().drop(['index'], axis = 1)
regions = {}
for i in range(len(channel_structures)):
# retrieve channel names
channel_names = channel_structures['Assigned Group Name'][i]
# get list of channels for each total channels entry
region_list = channel_names.split(separtor)
regions.update({int(channel_structures['Search Value'][i]): region_list})
return regions
def add_animal_id(file_data, user_data):
"""
Add animal id from channel name to labchart data
Parameters
----------
file_data : pd.DataFrame
user_data : Dataframe with user data for SAKE input
Returns
-------
file_data : List with channels in order
user_data: Dataframe with user data for SAKE input
"""
# get data containing channel order
drop_idx = user_data['Search Function'] == 'within'
animal_id = user_data[drop_idx].reset_index().drop(['index'], axis = 1)
# check if present
if len(animal_id) > 1:
raise(Exception('Only one Search Function with -within- is allowed!\n'))
if len(animal_id) == 0:
raise(Exception('Search Function -within- is required!\n'))
# convert to dictionary
ids = animal_id.loc[0].to_dict()
# define separator
sep = ids['Search Value']
# get file name
# ids['Category']
file_data['animal_id'] = ''
for i,name in enumerate(file_data[ids['Source']]):
if sep in name:
file_data.at[i, ids['Category']] = sep + name.split(sep)[1] + sep
return file_data, user_data.drop(np.where(drop_idx)[0], axis = 0)
def get_categories(user_data):
"""
Get unique categories and groups in dictionary.
Parameters
----------
user_data : pd.DataFrame, with user group inputs.
Returns
-------
groups : dict, keys are unique categories and groups.
"""
# get unique categories
unique_categories = user_data['Category'].unique()
groups = {} # create group dictionary
for category in unique_categories: # iterate over categories
# which groups exist in categories
groups.update({category: list(user_data['Assigned Group Name'][user_data['Category'] == category]) })
return groups
def reverse_hot_encoding(sort_df):
"""
Reverse hot coding in dataframe and replace with column names or nan
Parameters
----------
sort_df : pd.DataFrame, with columns in one hot encoding format
Returns
-------
col_labels: 1D np.array with columns retrieved from one hot encoded format
"""
# get columns
labels = np.array(sort_df.columns)
# find index where column is True #np.argmax(np.array(sort_df), axis = 1)
idx_array = np.array(sort_df)
col_labels = np.zeros(len(sort_df), dtype=object)
for i in range(idx_array.shape[0]): # iterate over idx_array
# find which column
idx = np.where(idx_array[i] == True)[0]
if len(idx) == 0: # if no True value present
col_labels[i] = np.NaN
elif len(idx) > 1: # if more than one True value present
col_labels[i] = np.NaN
elif len(idx) == 1: # if one True value present
col_labels[i] = labels[idx[0]]
return col_labels
def convert_logicdf_to_groups(index_df, logic_index_df, groups_ids:dict):
"""
Convert logic from logic_index_df to groups and and append to index_df
Parameters
----------
index_df : pd.DataFrame, to append categories
logic_index_df : pd.DataFrame, containing logic
groups_ids : dict, containg categories as keys and groups as values
Returns
-------
index_df : pd.DataFrame
"""
# convert logic to groups
for category, groups in groups_ids.items():
# check if all groups present in dataframe
groups_present = all(elem in logic_index_df.columns for elem in groups)
if (groups_present == True): # are all groups present in dataframe?
if (logic_index_df[groups].any().any() == True): # was any group detected?
# convert logic to groups
index_df[category] = reverse_hot_encoding(logic_index_df[groups])
return index_df
def get_source_logic(file_data, user_data, source:str):
"""
Find which unique groups exist and return as dataframe
Parameters
----------
user_data : pd.DataFrame
source : str, source destination
Returns
-------
index : pd.DataFrame
"""
# get only user data form source
user_data = user_data[user_data['Source'] == source].reset_index()
index = {}
for i in range(len(user_data)): # iterate over user data entries
# find index for specified source and match string
idx = getattr(search_function, user_data.at[i, 'Search Function'])(file_data[source], user_data.at[i, 'Search Value'])
# append to index dictionary
index.update({user_data.at[i, 'Assigned Group Name']: idx})
return | pd.DataFrame(index) | pandas.DataFrame |
__author__ = 'lucabasa'
__version__ = '1.0.6'
__status__ = 'development'
'''
This script contains methods to preserve the DataFrame structure inside of a pipeline.
In this way, it is possible to create custom transformers that create or delete features inside of the pipeline.
Moreover, the creation of dummies takes care of checking if the train and test set have the same dummies,
preventing the annoying error when a model is called and the shape of the data is not consistent.
Examples with the use of these methods can be found here https://www.kaggle.com/lucabasa/understand-and-use-a-pipeline
'''
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.impute import SimpleImputer
import warnings
class feat_sel(BaseEstimator, TransformerMixin):
'''
This transformer selects either numerical or categorical features.
In this way we can build separate pipelines for separate data types.
'''
def __init__(self, dtype='numeric'):
self.dtype = dtype # do not use parameters like _dype as it doesn't play nice with GridSearch
def fit( self, X, y=None ):
return self
def transform(self, X, y=None):
if self.dtype == 'numeric':
num_cols = X.columns[X.dtypes != object].tolist()
return X[num_cols]
elif self.dtype == 'category':
cat_cols = X.columns[X.dtypes == object].tolist()
return X[cat_cols]
class df_imputer(TransformerMixin, BaseEstimator):
'''
Just a wrapper for the SimpleImputer that keeps the dataframe structure
'''
def __init__(self, strategy='mean'):
self.strategy = strategy
self.imp = None
self.statistics_ = None
def fit(self, X, y=None):
self.imp = SimpleImputer(strategy=self.strategy)
self.imp.fit(X)
self.statistics_ = pd.Series(self.imp.statistics_, index=X.columns)
return self
def transform(self, X):
# assumes X is a DataFrame
Ximp = self.imp.transform(X)
Xfilled = pd.DataFrame(Ximp, index=X.index, columns=X.columns)
return Xfilled
class df_scaler(TransformerMixin, BaseEstimator):
'''
Wrapper of StandardScaler or RobustScaler
'''
def __init__(self, method='standard'):
self.scl = None
self.scale_ = None
self.method = method
if self.method == 'sdandard':
self.mean_ = None
elif method == 'robust':
self.center_ = None
self.columns = None # this is useful when it is the last step of a pipeline before the model
def fit(self, X, y=None):
if self.method == 'standard':
self.scl = StandardScaler()
self.scl.fit(X)
self.mean_ = pd.Series(self.scl.mean_, index=X.columns)
elif self.method == 'robust':
self.scl = RobustScaler()
self.scl.fit(X)
self.center_ = pd.Series(self.scl.center_, index=X.columns)
self.scale_ = | pd.Series(self.scl.scale_, index=X.columns) | pandas.Series |
class resource_database():
import pandas as pd
import ujson as json
from io import StringIO
from multiprocessing import Pool
from functools import partial
import ast
import os
import re
import glob
import textwrap
from contextlib import suppress
from pandas.errors import EmptyDataError
from selenium.common.exceptions import WebDriverException
import gnureadline
from prompt_toolkit import PromptSession
global tag_aliases,db,families,cat_files,wrapper,suppress,directory,id_to_cat,ps
global pd,json,StringIO,Pool,partial,ast,os,re,textwrap,WebDriverException,glob,EmptyDataError,suppress
#global open_cat,close_cat,close_all_cats,add_cat,add_cat_attributes
#global get_tag_aliases,add_alias,find,add_family,add_ref,save,end,show
ps = PromptSession()
wrapper = textwrap.TextWrapper(initial_indent=" ")
directory = os.path.dirname(os.path.realpath(__file__)) + '/'
with open(directory+'ID_to_cat.txt') as file:
id_to_cat = ast.literal_eval(file.read())
#print(var)
with open(directory+'tag_aliases.csv', 'r') as file:
tag_aliases = [set(line[:-1].split(',')) for line in file.readlines()]
with open(directory+'families.txt', 'r') as file:
families = json.loads(file.read())
#for key,lst in families.items():
# families[key] = set(lst)
cat_files = {}
import os
for file_name in os.listdir(directory+"categories"):
if not file_name.startswith('.'):
cat_name = file_name[:-4]
cat_files[cat_name] = None
@classmethod
def get_ID_to_cat(self,ID):
global id_to_cat
if id_to_cat is None:
with open(directory+"ID_to_cat.txt","r") as file:
id_to_cat = ast.literal_eval(file.read())
try:
return id_to_cat[str(ID)]
except KeyError:
print("No ref with specified ID was found!")
return []
@classmethod
def add_ref_to_id_to_cat(self,ID,cats):
global id_to_cat
if id_to_cat is None:
with open(directory+"ID_to_cat.txt","r") as file:
id_to_cat = ast.literal_eval(file.read())
id_to_cat[str(ID)] = cats
def is_a_cat(cat):
return cat in cat_files
def get_input(query):
while True:
user_input = ps.prompt(query).lower()
lst_input = re.split("[, ]+",user_input)
if lst_input[0] == "show":
print()
attr = lst_input[1] if len(lst_input) > 1 else re.split("[, ]+",ps.prompt("Attribute to show: "))[0]
if attr == "tag":
cats = ""
while True:
cats = ps.prompt("Categories to search for tags (type 'all' to include all tags): ")
if cats == "show":
resource_database.show(["cats"])
else:
break
resource_database.show(["tags",re.split("[, ]+", cats)])
elif attr == "alias":
resource_database.show(["aliases"])
elif attr == "cat":
resource_database.show(["cats"])
elif attr == "fam":
resource_database.show(["families"])
else:
print("Field '"+attr+"' does not exist.")
"""
if lst_input[1] == "key":
query = ["keys",re.split("[, ]+",input(
"Categories to search for keys (type 'all' to include all keys): "))]
resource_database.show(query)
"""
print()
else:
return user_input.lower()
@classmethod
def SetParser(self,data):
return ast.literal_eval(data)
@classmethod
def load_tags(self):
with open(directory+'tag_aliases.csv', 'r') as file:
tag_aliases = [set(line.split(',')) for line in file.readlines()]
@classmethod
def load_families(self):
with open(directory+'families.txt', 'r') as file:
families = json.loads(file.read())
@classmethod
def open_cat(self,cat_name):
if cat_name in cat_files and cat_files[cat_name] is not None:
return True
try:
converters = {s: (lambda data : None if data=="" else ast.literal_eval(data)) for s in
['keys','tags']}
cat_files[cat_name] = pd.read_csv(directory + "categories/"+cat_name+".csv",
converters=converters,index_col=0)
return True
except (FileNotFoundError,EmptyDataError):
temp = self.get_input("Category does not exist. Create a new category? ")
if temp.lower() == "yes":
open(directory+"categories/"+cat_name+".csv","w+").close()
cat_files[cat_name] = pd.DataFrame()#columns=["tags","keys","summary",
#"family","ref type","date","ref"])
return True
else:
print("Okay, category not created.")
return False
@classmethod
def close_cat(self,cat_name):
cat_files[cat_name].to_csv(directory+"categories/"+cat_name+".csv")
cat_files[cat_name] = None
@classmethod
def close_all_cats(self):
for cat_name in cat_files.keys():
close_cat(cat_name)
@classmethod
def add_cat(self,cat_name,cat_attr=None):
if cat_name in cat_files:
return False
f = open(cat_name +".txt","w+")
f.write("{}")
cat_files[cat_name] = None
@classmethod
def edit_cat_attributes(self,cat_name,cat_attr):
self.open_cat(cat_name)
if isinstance(cat_attr, list):
cat_files[cat_name].extend(cat_attr)
else:
cat_files[cat_name].append(cat_attr)
@classmethod
def get_tag_aliases(self,tag):
tag = tag.lower()
for equiv in tag_aliases:
if tag in equiv:
return equiv
@classmethod
def add_alias(self,lst):
final ={i.lower() for i in lst}
for equiv in tag_aliases:
for l in lst:
if l in equiv:
final.update(equiv)
tag_aliases.remove(equiv)
break
tag_aliases.append(final)
@classmethod
def query(self,cats=None,tags=None,families=None,ref_types=None):
if cats == None:
cats = cat_files.keys()
if tags != None:
tags = set(tags)
if ref_types != None:
ref_types = set(ref_types)
hit_ID = []
hits = []
hit_cat_names = []
for cat_name in cats:
if cat_name not in cat_files:
print("\nWarning: "+cat_name+" is not the name of a category.")
continue
if cat_files[cat_name] is None:
self.open_cat(cat_name)
for ID,ref_info in cat_files[cat_name].iterrows():
if ID not in hit_ID:
if tags == None or len(tags.intersection(ref_info['tags'])) > 0:
if families == None or ref_info['family'] in families:
if ref_types == None or ref_info['ref type'] in ref_types:
hit_ID.append(int(ID))
hit_cat_names.append(cat_name)
hits.append(ref_info)
return hits,hit_ID
@classmethod
def add_family(self,family_name,cats=[]):
#families[family_name] = set(cats)
families[family_name] = list(cats)
@classmethod
def add_ref(self,ref,cats=[],tags=None,keys=None,summary=None,family=None,ref_type=None):
if ref in ["download","downloads"]:
old_path = max(glob.iglob(os.path.expanduser('~/Downloads/*')), key=lambda a:os.stat(a).st_birthtime)
new_path = os.path.expanduser("~/resources/downloads/")+ os.path.basename(old_path)
os.rename(old_path,new_path)
ref = new_path
if ref_type == None:
if len(ref) > 3 and (ref[0:4] == "http" or ref[0:4] == "www."):
ref_type = "url"
elif " " not in ref and "/" in ref:
ref_type = "file"
else:
ref_type = "note"
if ref_type == "url":
if ref[0:4] != "www." and ref[0:4] != "http":
ref = "www." + ref
import datetime
t = datetime.date.today().strftime("%B %d, %Y")
if family != None:
if family not in families:
families[family] = list(cats)
else:
for c in cats:
if c not in families[family]:
families[family].append(c)
series = pd.Series({"tags":tags,"keys":keys,"summary":summary,"family":family,
"ref type":ref_type,"date":t,"ref":ref})
with open(directory+"max_ID.txt","r+") as file:
#a = "wow"
curr_max_ID = int(file.read().replace('\x00',''))
curr_max_ID += 1
file.truncate(0)
file.write(str(curr_max_ID))
series.name = str(curr_max_ID)
#with open("resources/ref_ID","a") as file:
# file.write("\n"+ID + ":" + cats)
for cat_name in cats:
self.open_cat(cat_name)
cat_files[cat_name] = cat_files[cat_name].append(series)
#cat_files[cat_name] = pd.DataFrame(series).transpose()#pd.DataFrame(series,columns=["tags","keys","summary",
# "family","type","date","ref"])
self.close_cat(cat_name)
self.add_ref_to_id_to_cat(curr_max_ID,cats)
@classmethod
def save(self):
with open(directory+'tag_aliases.csv', 'w') as file:
for i in tag_aliases:
file.write(",".join(i) + "\n")
with open(directory+'families.txt','w') as file:
#file.truncate()
file.write(json.dumps(families))
for cat_name,df in cat_files.items():
if df is not None:
df.to_csv(directory+"categories/" + cat_name+".csv")
if id_to_cat is not None:
with open(directory+'ID_to_cat.txt','w') as file:
#file.truncate()
file.write(json.dumps(id_to_cat))
"""
with open('resources/resources.txt', 'w') as file:
file.truncate()
file.write("{")
for key,df in db.items():
file.write("\""+key+ "\":" + df.to_csv(sep="`"))
file.write("}")
"""
@classmethod
def end(self):
self.save()
exit()
@classmethod
def show(self,query):
#query = [q.lower() for q in query]
if query[0] in ["cats","cat","categories","category"]:
print(self.get_contents(list(cat_files.keys())))
elif query[0] == "alias" or query[0] == "aliases":
for t in tag_aliases:
print(t)
elif query[0] == "tags":
if query[1] == ["all"]:
query[1] = cat_files.keys()
tags = set()
failed_cats = []
for cat in query[1]:
self.open_cat(cat)
try:
tags.update({t for ref_tags in cat_files[cat].loc[:,"tags"] for t in ref_tags})
except KeyError:
failed_cats.append(cat)
self.close_cat(cat)
print("\n" + self.get_contents(tags))
if len(failed_cats) > 0:
print("\n Note that the following were not valid categories, and thus were skipped:")
print(wrapper.fill(self.get_contents(failed_cats)))
elif query[0] == "family" or query[0] == "families":
print(self.get_contents(families))
@classmethod
def get(self,num_hits="all",features=None,cats=None,tags=None,families=None,ref_types=None):
ordered_cols = ["date","family","keys","ref type","summary","tags","ref"]
display_columns = []
if features is None:
features = ["keys","tags","family","summary","ref"]
for i in ordered_cols:
if features == "all" or i in features:
display_columns.append(i)
hits,hit_IDs = self.query(cats,tags,families,ref_types)
#df = pd.concat(hits, axis=1, keys=[hit.name for hit in hits])
#df["cat"] = hit_cat_names
if len(hits) == 0:
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, LassoCV, Ridge, RidgeCV
from sklearn.metrics import r2_score, mean_squared_error
"""
1. get_Xy(df): Separate features and target variable
2. get_score(X_train,X_val,y_train,y_val)
3. categorical(X_train,X_val,X_test,cat_variable)
"""
def get_Xy(df):
df = df.dropna()
target = 'opening_weekend_usa'
all_column = df.columns.values.tolist()
all_column.remove(target)
y = df[target]
X = df[all_column]
return X, y
def get_score(X_train,X_val,y_train,y_val):
# fit linear regression to training data
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
y_pred = lr_model.predict(X_val)
# score fit model on validation data
train_score = lr_model.score(X_train, y_train)
val_score = lr_model.score(X_val, y_val)
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
# report results
print('\nTrain R^2 score was:', train_score)
print('Validation R^2 score was:', val_score)
print(f'RMSE: {rmse:.2f} \n')
# print('Feature coefficient results:')
# for feature, coef in zip(X.columns, lr_model.coef_):
# print(feature, ':', f'{coef:.2f}')
# Visualization
fig, ax = plt.subplots(1, 1)
plt.scatter(y_val, y_pred, alpha=0.4)
ax.set_xlabel('Opening weekend revenue ($ in millions)',fontsize=20)
ax.set_ylabel('Prediction ($ in millions)',fontsize=20)
ax.set_title('R$^2$: %0.2f' % val_score, fontsize=20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
x=np.linspace(0,0.7e2,50)
# x=np.linspace(4,9,50)
y=x
plt.plot(x,y,color='firebrick',linewidth=3,alpha=0.6)
plt.ylim(0,)
plt.xlim(0,)
return fig, lr_model, y_pred
def categorical_multilabel(X_train,X_val,X_test,cat_variable):
"""
Input: X_train,X_val,X_test,categorical_variable
Processing: preprocessing the three sets separately:
1. Separate continuous and categorical variable
2. Scaling + polynomial fit the conitnuous variables and get_dummies on the categorical variable
3. Combine back the continuous and categorical data
Return: tranformed X_train, X_val, X_test
"""
scaler = StandardScaler()
poly = PolynomialFeatures(degree=2,interaction_only = False)
# Train set
# Convert genre to dummies
X_train_genre = X_train[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
known_columns = X_train_genre.columns
# Scaling continuous variables
X_train_con = X_train[con_feature]
X_train_con_scaled = scaler.fit_transform(X_train_con)
X_train_con_scaled_df = pd.DataFrame(X_train_con_scaled, columns=X_train_con.columns, index=X_train_con.index)
X_train_poly = poly.fit_transform(X_train_con_scaled)
X_train_poly_df = pd.DataFrame(X_train_poly, columns=poly.get_feature_names(X_train_con.columns), index=X_train_con.index)
#Combine
# X_train = pd.concat([X_train_genre,X_train_con_scaled_df],axis=1)
X_train = | pd.concat([X_train_genre,X_train_poly_df],axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import re
from nltk import word_tokenize
import nltk
from others.logging_utils import init_logger
from itertools import chain
import geojson
import json
from geopy import distance
from tqdm import tqdm
import os
import gc
def free_space(del_list):
for name in del_list:
if not name.startswith('_'):
del globals()[name]
gc.collect()
def sd(col, max_loss_limit=0.001, avg_loss_limit=0.001, na_loss_limit=0, n_uniq_loss_limit=0, fillna=0):
"""
max_loss_limit - don't allow any float to lose precision more than this value. Any values are ok for GBT algorithms as long as you don't unique values.
See https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_decimal_values_in_[0,_1]
avg_loss_limit - same but calculates avg throughout the series.
na_loss_limit - not really useful.
n_uniq_loss_limit - very important parameter. If you have a float field with very high cardinality you can set this value to something like n_records * 0.01 in order to allow some field relaxing.
"""
is_float = str(col.dtypes)[:5] == 'float'
na_count = col.isna().sum()
n_uniq = col.nunique(dropna=False)
try_types = ['float16', 'float32']
if na_count <= na_loss_limit:
try_types = ['int8', 'int16', 'float16', 'int32', 'float32']
for type in try_types:
col_tmp = col
# float to int conversion => try to round to minimize casting error
if is_float and (str(type)[:3] == 'int'):
col_tmp = col_tmp.copy().fillna(fillna).round()
col_tmp = col_tmp.astype(type)
max_loss = (col_tmp - col).abs().max()
avg_loss = (col_tmp - col).abs().mean()
na_loss = np.abs(na_count - col_tmp.isna().sum())
n_uniq_loss = np.abs(n_uniq - col_tmp.nunique(dropna=False))
if max_loss <= max_loss_limit and avg_loss <= avg_loss_limit and na_loss <= na_loss_limit and n_uniq_loss <= n_uniq_loss_limit:
return col_tmp
# field can't be converted
return col
def reduce_mem_usage_sd(df, deep=True, verbose=False, obj_to_cat=False):
numerics = ['int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2
for col in tqdm(df.columns):
col_type = df[col].dtypes
# collect stats
na_count = df[col].isna().sum()
n_uniq = df[col].nunique(dropna=False)
# numerics
if col_type in numerics:
df[col] = sd(df[col])
# strings
if (col_type == 'object') and obj_to_cat:
df[col] = df[col].astype('category')
if verbose:
print(f'Column {col}: {col_type} -> {df[col].dtypes}, na_count={na_count}, n_uniq={n_uniq}')
new_na_count = df[col].isna().sum()
if (na_count != new_na_count):
print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost na values. Before: {na_count}, after: {new_na_count}')
new_n_uniq = df[col].nunique(dropna=False)
if (n_uniq != new_n_uniq):
print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost unique values. Before: {n_uniq}, after: {new_n_uniq}')
end_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2
percent = 100 * (start_mem - end_mem) / start_mem
print('Mem. usage decreased from {:5.2f} Mb to {:5.2f} Mb ({:.1f}% reduction)'.format(start_mem, end_mem, percent))
return df
def etl_1(data, url_):
#function which return anno in number othwerwise null
def Anno_cleaner(x):
try:
return(float(x))
except:
return(np.nan)
#check if price has da inside price and return --> "Asta" otherwise "no_asta"
def asta(x):
asta = 'no_asta'
try:
if 'da' in x:
asta = 'asta'
except:
return(asta)
return(asta)
#Clean price from.. (Da, Symbol, .)
def clean_price(text):
try:
text = re.sub("da", "", text)
text = re.sub("€", "", text)
text = re.sub(r'\.', '', text)
except:
return(text)
return(text)
#Function which clean sconto by taking out parenthesis, %, -
def clean_sconto(text):
try:
text = re.sub(r"\(", "", text)
text = re.sub(r"\)", "", text)
text = re.sub(r'%', '', text)
text = re.sub(r'-', '', text)
except:
return(text)
return(text)
#Function which clean metri by taking out m2
def clean_metri(text):
try:
text = re.sub(r'm2','', text)
except:
return(text)
return(text)
#function which fill NA with mancante
# def missing_filler(data, char, label = 'mancante'):
# for col in char:
# data[col] = data[col].fillna('mancante')
# return(data)
#Clean out from every special character in special_list
def clean_special(x):
special_list = [r'\:', r'\.', r'\-', r'\_', r'\;', r'\,', r'\'']
for symbol in special_list:
x = re.sub(symbol, ' ', x)
return(x)
#find position from description
def position_cleaner(x):
def cl1(x):
x = re.sub(r'\,', '', x)
x = re.sub(r' +', ' ', x)
return(x)
x = re.sub(r'(\,) +\d+', lambda s: cl1(s.group()), x)
return(x)
#clean string
def formatter(x):
x = x.strip()
x = re.sub(r'\s+', ' ', x)
return(x)
#Clean error from short name
def error_cleaner(x):
x = re.sub(r'v\.le', 'viale', x)
return(x)
#
def address_exctractor(x):
termini_ = ['via privata', 'via', 'viale', 'piazzetta', 'foro', 'cavalcavia',
'giardino', 'vicolo', 'passaggio', 'sito', 'parco', 'sottopasso',
'piazza', 'piazzale', 'largo', 'corso', 'alzaia', 'strada', 'ripa',
'galleria', 'foro', 'bastioni']
x = x.lower()
#find position
x = position_cleaner(x)
#clean error
x = error_cleaner(x)
#find address after termini_
address = ''
for lab_ in termini_:
#search for match
temp = re.search(r'\b%s\b' %lab_, x)
#find address by matching
if (temp is not None):
temp = re.search(r'%s (.*?)\,' %lab_, x)
try:
address_regex = temp.group(0) #if lab_ is not inside the name of the address continue else skip
address = clean_special(address_regex)
except:
pass
#clean ending string
address = formatter(address)
return(address)
#take out number from address to get nome via
def nome_via(x):
return(formatter(re.sub(r'\d+', '', x)))
#take out text and keep number
def numero_via(x):
x = x.lower()
x = re.sub('via 8 ottobre 2001', '', x) #via 8 ottobre exception
digit = re.search(r'\d+', x)
try:
x = digit.group()
except:
return('')
return(re.sub(r'\s+', '', x))
# char = ['Stanze', 'Bagni', 'Piano', 'Garantito', 'stato', 'classe_energetica', 'piano']
data = data.reset_index(drop = True)
url_ = url_.reset_index(drop = True)
#Clean Anno
url_['Anno_Costruzione'] = url_['Anno_Costruzione'].apply(lambda x: Anno_cleaner(x))
url_['Anno_Costruzione'] = url_['Anno_Costruzione'].convert_dtypes()
data = pd.concat([data, url_], axis = 1)
#Clean Prezzo
data['asta'] = data['Prezzo'].apply(lambda s: asta(s))
data['Prezzo'] = data['Prezzo'].apply(lambda s: clean_price(s)).astype(float)
data['Prezzo_Vecchio'] = data['Prezzo_Vecchio'].apply(lambda s: clean_price(s)).astype(float)
data['Sconto'] = data['Sconto'].apply(lambda s: clean_sconto(s)).astype(float)
#Clean Metri
data['Metri'] = data['Metri'].apply(lambda s: clean_metri(s)).astype(float)
data['Prezzo_al_mq'] = data['Prezzo']/data['Metri']
#Clean Piano
data['Piano'] = data['Piano'].replace({'T': 'Terra', 'R': 'Piano Rialzato', 'S': 'Seminterrato', 'A': 'Ultimo'})
# data = missing_filler(data, char)
#extract Indirizzo, Nome Via and numero via
data['indirizzo'] = data['Posizione'].apply(lambda x: address_exctractor(x))
data['nome_via'] = data.indirizzo.apply(lambda s: nome_via(s))
data['numero_via'] = data.indirizzo.apply(lambda s: numero_via(s))
return(data)
def etl_2(args, data):
#Function which calculate intersection score betweem
def scorer(segment_1, segment_2, missing_pos, indirizzo, original, logger):
vec = []
#cycle over each missing position
for m_1 in missing_pos:
vec_2 = np.zeros(indirizzo.shape[0])
#calculate intersection between segment_1, segment_1 to normalize
intersection_top = segment_1[m_1] & segment_1[m_1]
#calculate score of intersection to normalize
top_ = score_intersection(intersection_top)
#iterate over each indirizzo to calculate score of intersection
for m_2 in range(indirizzo.shape[0]):
#calculate intersection set
intersection_try = segment_1[m_1] & segment_2[m_2]
#calculate score
vec_2[m_2] = score_intersection(intersection_try)
#find max
max_ = np.max(vec_2)
#count how many are equal to max score
len_max = np.sum(vec_2 == max_)
#if normalize score assign new indirizzo
if max_/top_ > args.treshold:
if len_max>1:
#in case of ties take indirizzo with nearest number address
number_ = number_intersection(segment_1[m_1], segment_2[vec_2 == max_].values)
#find which address is selected
pos = (np.where(vec_2 == max_)[0])[number_]
#add indirizzo
vec += [indirizzo[pos]]
#print correction with score
logger.info('Segmento errore: {}; via scelta: {}; Match: {}'.format(original[m_1], indirizzo[pos], max_/top_))
else:
#assign indirizzo with max score
vec += [indirizzo[np.argmax(vec_2)]]
logger.info('Via originale: {}; Via scelta: {}; Match: {}'.format(original[m_1],
indirizzo[np.argmax(vec_2)], max_/top_))
else:
vec += [np.nan]
logger.info('errore no match, score {} -- via originale: {}; Matched: {}'.format(max_/top_, original[m_1],
indirizzo[np.argmax(vec_2)]))
#this home didn't find any real address to match up
logger.info('\n\n''{} of home deleted cause error in address typing\n\n'.format(np.sum([ | pd.isna(x) | pandas.isna |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.index import _ensure_index_from_sequences
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas.compat import lzip
from pandas.core.dtypes.common import (
is_bool_dtype,
is_numeric_dtype,
is_timedelta64_dtype)
import warnings
import numpy as np
import ray
import itertools
class DataFrame(object):
def __init__(self, df, columns, index=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
df ([ObjectID]): The list of ObjectIDs that contain the dataframe
partitions.
columns (pandas.Index): The column names for this dataframe, in
pandas Index object.
index (pandas.Index or list): The row index for this dataframe.
"""
assert(len(df) > 0)
self._df = df
self.columns = columns
# this _index object is a pd.DataFrame
# and we use that DataFrame's Index to index the rows.
self._lengths, self._index = _compute_length_and_index.remote(self._df)
if index is not None:
self.index = index
def __str__(self):
return repr(self)
def __repr__(self):
if sum(self._lengths) < 40:
result = repr(to_pandas(self))
return result
head = repr(to_pandas(self.head(20)))
tail = repr(to_pandas(self.tail(20)))
result = head + "\n...\n" + tail
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._index.index
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._index.index = new_index
index = property(_get_index, _set_index)
def _get__index(self):
"""Get the _index for this DataFrame.
Returns:
The default index.
"""
if isinstance(self._index_cache, ray.local_scheduler.ObjectID):
self._index_cache = ray.get(self._index_cache)
return self._index_cache
def _set__index(self, new__index):
"""Set the _index for this DataFrame.
Args:
new__index: The new default index to set.
"""
self._index_cache = new__index
_index = property(_get__index, _set__index)
def _compute_lengths(self):
"""Updates the stored lengths of DataFrame partions
"""
self._lengths = [_deploy_func.remote(_get_lengths, d)
for d in self._df]
def _get_lengths(self):
"""Gets the lengths for each partition and caches it if it wasn't.
Returns:
A list of integers representing the length of each partition.
"""
if isinstance(self._length_cache, ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
elif isinstance(self._length_cache, list) and \
isinstance(self._length_cache[0],
ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
return self._length_cache
def _set_lengths(self, lengths):
"""Sets the lengths of each partition for this DataFrame.
We use this because we can compute it when creating the DataFrame.
Args:
lengths ([ObjectID or Int]): A list of lengths for each
partition, in order.
"""
self._length_cache = lengths
_lengths = property(_get_lengths, _set_lengths)
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# The number of dimensions is common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ndim, self._df[0]))
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ftypes, self._df[0]))
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
# The dtypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.dtypes, self._df[0]))
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
all_empty = ray.get(self._map_partitions(lambda df: df.empty)._df)
return False not in all_empty
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return np.concatenate(
ray.get(self._map_partitions(lambda df: df.values)._df))
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return (len(self.index), len(self.columns))
def _map_partitions(self, func, index=None):
"""Apply a function on each partition.
Args:
func (callable): The function to Apply.
Returns:
A new DataFrame containing the result of the function.
"""
assert(callable(func))
new_df = [_deploy_func.remote(func, part) for part in self._df]
if index is None:
index = self.index
return DataFrame(new_df, self.columns, index=index)
def _update_inplace(self, df=None, columns=None, index=None):
"""Updates the current DataFrame inplace
"""
assert(len(df) > 0)
if df:
self._df = df
if columns:
self.columns = columns
if index:
self.index = index
self._lengths, self._index = _compute_length_and_index.remote(self._df)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
return DataFrame(self._df, new_cols, index=self.index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
return DataFrame(self._df, new_cols, index=self.index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
assert(callable(func))
return self._map_partitions(lambda df: df.applymap(lambda x: func(x)))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(self._df, self.columns, index=self.index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
indices = self.index.unique()
chunksize = int(len(indices) / len(self._df))
partitions = [_shuffle.remote(df, indices, chunksize)
for df in self._df]
partitions = ray.get(partitions)
# Transpose the list of dataframes
# TODO find a better way
shuffle = []
for i in range(len(partitions[0])):
shuffle.append([])
for j in range(len(partitions)):
shuffle[i].append(partitions[j][i])
new_dfs = [_local_groupby.remote(part, axis=axis) for part in shuffle]
return DataFrame(new_dfs, self.columns, index=indices)
def reduce_by_index(self, func, axis=0):
"""Perform a reduction based on the row index.
Args:
func (callable): The function to call on the partition
after the groupby.
Returns:
A new DataFrame with the result of the reduction.
"""
return self.groupby(axis=axis)._map_partitions(
func, index=pd.unique(self.index))
def sum(self, axis=None, skipna=True, level=None, numeric_only=None):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
intermediate_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
sum_of_partitions = self._map_partitions(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only),
index=intermediate_index)
return sum_of_partitions.reduce_by_index(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only))
def abs(self):
"""Apply an absolute value function to all numberic columns.
Returns:
A new DataFrame with the applied absolute value.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
return self._map_partitions(lambda df: df.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return self._map_partitions(lambda df: df.isin(values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isnull)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
# Each partition should have the same index, so we'll use 0's
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Note: Triggers a shuffle.
Returns:
A new DataFrame transposed from this DataFrame.
"""
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
temp_columns = self.index
local_transpose = self._map_partitions(
lambda df: df.transpose(*args, **kwargs), index=temp_index)
local_transpose.columns = temp_columns
# Sum will collapse the NAs from the groupby
df = local_transpose.reduce_by_index(
lambda df: df.apply(lambda x: x), axis=1)
# Reassign the columns within partition to self.index.
# We have to use _depoly_func instead of _map_partition due to
# new_labels argument
def _reassign_columns(df, new_labels):
df.columns = new_labels
return df
df._df = [
_deploy_func.remote(
_reassign_columns,
part,
self.index) for part in df._df]
return df
T = property(transpose)
def dropna(self, axis, how, thresh=None, subset=[], inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
raise NotImplementedError("Not yet")
if how != 'any' and how != 'all':
raise ValueError("<how> not correctly set.")
def add(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def agg(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def aggregate(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def all(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.all(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def any(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.any(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def append(self, other, ignore_index=False, verify_integrity=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_blocks(self, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_matrix(self, columns=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asof(self, where, subset=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def assign(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def astype(self, dtype, copy=True, errors='raise', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at_time(self, time, asof=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError("""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all().""")
else:
return to_pandas(self).bool()
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None,
**kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_lower(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_upper(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine(self, other, func, fill_value=None, overwrite=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine_first(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def compound(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def consolidate(self, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corr(self, method='pearson', min_periods=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corrwith(self, other, axis=0, drop=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def count(self, axis=0, level=None, numeric_only=False):
if axis == 1:
return self.T.count(axis=0,
level=level,
numeric_only=numeric_only)
else:
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
collapsed_df = sum(
ray.get(
self._map_partitions(
lambda df: df.count(
axis=axis,
level=level,
numeric_only=numeric_only),
index=temp_index)._df))
return collapsed_df
def cov(self, min_periods=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummax(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummin(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def describe(self, percentiles=None, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def diff(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def div(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def divide(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def dot(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop_duplicates(self, subset=None, keep='first', inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def duplicated(self, subset=None, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def eq(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
def helper(df, index, other_series):
return df.iloc[index['index_within_partition']] \
.equals(other_series)
results = []
other_partition = None
other_df = None
for i, idx in other._index.iterrows():
if idx['partition'] != other_partition:
other_df = ray.get(other._df[idx['partition']])
other_partition = idx['partition']
# TODO: group series here into full df partitions to reduce
# the number of remote calls to helper
other_series = other_df.iloc[idx['index_within_partition']]
curr_index = self._index.iloc[i]
curr_df = self._df[int(curr_index['partition'])]
results.append(_deploy_func.remote(helper,
curr_df,
curr_index,
other_series))
for r in results:
if not ray.get(r):
return False
return True
def eval(self, expr, inplace=False, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def filter(self, items=None, like=None, regex=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def floordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_csv(self, path, header=0, sep=', ', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=None,
infer_datetime_format=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_dict(self, data, orient='columns', dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_items(self, items, columns=None, orient='columns'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_records(self, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ge(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
temp_df = self._map_partitions(lambda df: df.get(key, default=default))
return to_pandas(temp_df)
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_dtype_counts(), self._df[0]
)
)
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_ftype_counts(), self._df[0]
)
)
def get_value(self, index, col, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get_values(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def gt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def head(self, n=5):
"""Get the first n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the first n rows of the dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes))
new_dfs = [self._df[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the first partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.head(num_to_transfer),
self._df[last_index]))
index = self._index.head(n).index
return DataFrame(new_dfs, self.columns, index=index)
def hist(self, data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmax(axis=axis, skipna=skipna)))
else:
return self.T.idxmax(axis=1, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmin(axis=axis, skipna=skipna)))
else:
return self.T.idxmin(axis=1, skipna=skipna)
def infer_objects(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
try:
len(value)
except TypeError:
value = [value for _ in range(len(self.index))]
if len(value) != len(self.index):
raise ValueError(
"Column length provided does not match DataFrame length.")
if loc < 0 or loc > len(self.columns):
raise ValueError(
"Location provided must be higher than 0 and lower than the "
"number of columns.")
if not allow_duplicates and column in self.columns:
raise ValueError(
"Column {} already exists in DataFrame.".format(column))
cumulative = np.cumsum(self._lengths)
partitions = [value[cumulative[i-1]:cumulative[i]]
for i in range(len(cumulative))
if i != 0]
partitions.insert(0, value[:cumulative[0]])
# Because insert is always inplace, we have to create this temp fn.
def _insert(_df, _loc, _column, _part, _allow_duplicates):
_df.insert(_loc, _column, _part, _allow_duplicates)
return _df
self._df = \
[_deploy_func.remote(_insert,
self._df[i],
loc,
column,
partitions[i],
allow_duplicates)
for i in range(len(self._df))]
self.columns = self.columns.insert(loc, column)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.iterrows()), part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
series = map(lambda idx_series_tuple: idx_series_tuple[1], iters)
return zip(self.index, series)
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
iters = ray.get([_deploy_func.remote(
lambda df: list(df.items()), part) for part in self._df])
def concat_iters(iterables):
for partitions in zip(*iterables):
series = pd.concat([_series for _, _series in partitions])
series.index = self.index
yield (series.name, series)
return concat_iters(iters)
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name='Pandas'):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.itertuples(index=index, name=name)),
part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
def _replace_index(row_tuple, idx):
# We need to use try-except here because
# isinstance(row_tuple, namedtuple) won't work.
try:
row_tuple = row_tuple._replace(Index=idx)
except AttributeError: # Tuple not namedtuple
row_tuple = (idx,) + row_tuple[1:]
return row_tuple
if index:
iters = itertools.starmap(_replace_index, zip(iters, self.index))
return iters
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def le(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lookup(self, row_labels, col_labels):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mad(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def max(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.max(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.max(axis=1, skipna=None, level=None,
numeric_only=None, **kwargs)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def median(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def memory_usage(self, index=True, deep=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def min(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.min(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.min(axis=1, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
def mod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mode(self, axis=0, numeric_only=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def multiply(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ne(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nlargest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def notna(self):
"""Perform notna across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notnull())
def nsmallest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nunique(self, axis=0, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pipe(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot(self, index=None, columns=None, values=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def plot(self, x=None, y=None, kind='line', ax=None, subplots=False,
sharex=None, sharey=False, layout=None, figsize=None,
use_index=True, title=None, grid=None, legend=True, style=None,
logx=False, logy=False, loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None, rot=None, fontsize=None, colormap=None,
table=False, yerr=None, xerr=None, secondary_y=False,
sort_columns=False, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
popped = to_pandas(self._map_partitions(
lambda df: df.pop(item)))
self._df = self._map_partitions(lambda df: df.drop([item], axis=1))._df
self.columns = self.columns.drop(item)
return popped
def pow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def prod(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def product(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
new_dfs = [_deploy_func.remote(lambda df: df.query(expr, **kwargs),
part) for part in self._df]
if inplace:
self._update_inplace(new_dfs)
else:
return DataFrame(new_dfs, self.columns)
def radd(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rdiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex(self, labels=None, index=None, columns=None, axis=None,
method=None, copy=True, level=None, fill_value=np.nan,
limit=None, tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename(self, mapper=None, index=None, columns=None, axis=None,
copy=True, inplace=False, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reorder_levels(self, order, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into dataframe columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, pd.PeriodIndex):
values = index.asobject.values
elif isinstance(index, pd.DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
_, new_index = _compute_length_and_index.remote(new_obj._df)
new_index = ray.get(new_index).index
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, pd.MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, pd.MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, pd.MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
def rfloordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def round(self, decimals=0, *args, **kwargs):
return self._map_partitions(lambda df: df.round(decimals=decimals,
*args,
**kwargs))
def rpow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rsub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rtruediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select(self, crit, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select_dtypes(self, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sem(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pd.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if | is_scalar(labels) | pandas.api.types.is_scalar |
import warnings
from functools import reduce
import os
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from qualipy.project import Project
from qualipy.util import set_value_type, set_metric_id
from qualipy.anomaly._isolation_forest import IsolationForestModel
from qualipy.anomaly._prophet import ProphetModel
from qualipy.anomaly._std import STDCheck
from qualipy.anomaly.base import LoadedModel
from qualipy.anomaly.trend_rules import trend_rules
anomaly_columns = [
"column_name",
"date",
"metric",
"arguments",
"return_format",
"value",
"severity",
"batch_name",
"insert_time",
"trend_function_name",
]
MODS = {
"IsolationForest": IsolationForestModel,
"prophet": ProphetModel,
"std": STDCheck,
}
class GenerateAnomalies:
def __init__(self, project_name, config_dir):
self.config_dir = config_dir
with open(os.path.join(config_dir, "config.json"), "r") as conf_file:
config = json.load(conf_file)
self.model_type = config[project_name].get("ANOMALY_MODEL", "std")
self.anom_args = config[project_name].get("ANOMALY_ARGS", {})
self.specific = self.anom_args.pop("specific", {})
self.project_name = project_name
self.project = Project(project_name, config_dir=config_dir, re_init=True)
df = self.project.get_project_table()
df["floored_datetime"] = df.date.dt.floor("T")
df = (
df.groupby("floored_datetime", as_index=False)
.apply(lambda g: g[g.insert_time == g.insert_time.max()])
.reset_index(drop=True)
)
df = df.drop("floored_datetime", axis=1)
df.column_name = df.column_name + "_" + df.run_name
df["metric_name"] = (
df.column_name
+ "_"
+ df.metric.astype(str)
+ "_"
+ np.where(df.arguments.isnull(), "", df.arguments)
)
df = set_metric_id(df)
df = df.sort_values("date")
self.df = df
def _num_train_and_save(self, data, all_rows, metric_name):
try:
metric_id = data.metric_id.iloc[0]
mod = MODS[self.model_type](
config_dir=self.config_dir,
metric_name=metric_id,
project_name=self.project_name,
)
mod.fit(data)
mod.save()
preds = mod.predict(data)
if isinstance(preds, tuple):
severity = preds[1]
preds = preds[0]
outlier_rows = data[preds == -1].copy()
outlier_rows["severity"] = severity[preds == -1]
else:
outlier_rows = data[preds == -1]
outlier_rows["severity"] = np.NaN
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
except Exception as e:
print(str(e))
warnings.warn(f"Unable to create anomaly model for {metric_name}")
return all_rows
def _num_from_loaded_model(self, data, all_rows):
mod = LoadedModel(config_dir=self.config_dir)
mod.load(data.metric_id.iloc[0])
preds = mod.predict(data)
if isinstance(preds, tuple):
severity = preds[1]
preds = preds[0]
outlier_rows = data[preds == -1].copy()
outlier_rows["severity"] = severity[preds == -1]
else:
outlier_rows = data[preds == -1]
outlier_rows["severity"] = np.NaN
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
return all_rows
def create_anom_num_table(self, retrain=False):
df = self.df.copy()
df = df[
(df["type"] == "numerical")
| (df["column_name"].isin(["rows", "columns"]))
| (df["metric"].isin(["perc_missing", "count"]))
]
df.value = df.value.astype(float)
all_rows = []
if self.model_type != "ignore":
for metric_name, data in tqdm(df.groupby("metric_name")):
if not retrain:
try:
all_rows = self._num_from_loaded_model(data, all_rows)
except ValueError:
warnings.warn(f"Unable to load anomaly model for {metric_name}")
except FileNotFoundError:
all_rows = self._num_train_and_save(data, all_rows, metric_name)
else:
all_rows = self._num_train_and_save(data, all_rows, metric_name)
try:
data = pd.concat(all_rows).sort_values("date", ascending=False)
data["trend_function_name"] = np.NaN
data = data[anomaly_columns]
data.value = data.value.astype(str)
except:
data = pd.DataFrame([], columns=anomaly_columns)
return data
def create_anom_cat_table(self, retrain=False):
df = self.df
df = df[df["type"] == "categorical"]
all_rows = []
if self.model_type != "ignore":
for metric_id, data in tqdm(df.groupby("metric_id")):
data = set_value_type(data.copy())
try:
data_values = [
(pd.Series(c) / pd.Series(c).sum()).to_dict()
for c in data["value"]
]
unique_vals = reduce(
lambda x, y: x.union(y), [set(i.keys()) for i in data_values]
)
non_diff_lines = []
potential_lines = []
for cat in unique_vals:
values = pd.Series([i.get(cat, 0) for i in data_values])
running_means = values.rolling(window=5).mean()
differences = values - running_means
sum_abs = np.abs(differences).sum()
potential_lines.append((cat, differences, sum_abs))
non_diff_lines.append((cat, values))
potential_lines = sorted(
potential_lines, key=lambda v: v[2], reverse=True
)
diffs_df = pd.DataFrame({i[0]: i[1] for i in potential_lines})
diffs_df["sum_of_changes"] = diffs_df.abs().sum(axis=1)
all_non_diff_lines = pd.DataFrame(
{i[0]: i[1] for i in non_diff_lines}
)
for col in all_non_diff_lines.columns:
mean = all_non_diff_lines[col].mean()
std = all_non_diff_lines[col].std()
if std > 0.05:
all_non_diff_lines[f"{col}_below"] = np.where(
all_non_diff_lines[col] < (mean - (4 * std)), 1, 0
)
all_non_diff_lines[f"{col}_above"] = np.where(
all_non_diff_lines[col] > (mean + (4 * std)), 1, 0
)
else:
all_non_diff_lines[f"{col}_below"] = 0
all_non_diff_lines[f"{col}_above"] = 0
std_sums = all_non_diff_lines[
[
col
for col in all_non_diff_lines.columns
if "_below" in str(col) or "_above" in str(col)
]
].sum(axis=1)
mod = IsolationForestModel(
config_dir=self.config_dir,
metric_name=metric_id,
arguments={
"contamination": 0.01,
"n_estimators": 50,
"multivariate": True,
"check_for_std": True,
},
)
outliers = mod.train_predict(all_non_diff_lines)
all_non_diff_lines["iso_outlier"] = outliers
data["severity"] = diffs_df.sum_of_changes.values
sample_size = data.value.apply(lambda v: sum(v.values()))
outlier_rows = data[
(outliers == -1) & (std_sums.values > 0) & (sample_size > 10)
]
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
except ValueError:
pass
try:
data = | pd.concat(all_rows) | pandas.concat |
"""
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Float64Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
from pandas.api.types import is_datetime64tz_dtype, pandas_dtype
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex),
(Float64Index, Int64Index): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, RangeIndex): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, UInt64Index): (tm.makeFloatIndex, tm.makeUIntIndex),
}
def test_union_same_types(index):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = index.sort_values()
idx2 = index.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(index, index_fixture2):
# This test only considers combinations of indices
# GH 23525
idx1, idx2 = index, index_fixture2
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
pytest.xfail("This test only considers non compatible indexes.")
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
pytest.xfail("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.xfail("This test only considers non matching dtypes.")
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype("O")
assert idx2.union(idx1).dtype == np.dtype("O")
@pytest.mark.parametrize("idx_fact1,idx_fact2", COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
@pytest.mark.parametrize(
"left, right, expected",
[
("int64", "int64", "int64"),
("int64", "uint64", "object"),
("int64", "float64", "float64"),
("uint64", "float64", "float64"),
("uint64", "uint64", "uint64"),
("float64", "float64", "float64"),
("datetime64[ns]", "int64", "object"),
("datetime64[ns]", "uint64", "object"),
("datetime64[ns]", "float64", "object"),
("datetime64[ns, CET]", "int64", "object"),
("datetime64[ns, CET]", "uint64", "object"),
("datetime64[ns, CET]", "float64", "object"),
("Period[D]", "int64", "object"),
("Period[D]", "uint64", "object"),
("Period[D]", "float64", "object"),
],
)
@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)])
def test_union_dtypes(left, right, expected, names):
left = pandas_dtype(left)
right = pandas_dtype(right)
a = pd.Index([], dtype=left, name=names[0])
b = pd.Index([], dtype=right, name=names[1])
result = a.union(b)
assert result.dtype == expected
assert result.name == names[2]
# Testing name retention
# TODO: pin down desired dtype; do we want it to be commutative?
result = a.intersection(b)
assert result.name == names[2]
def test_dunder_inplace_setops_deprecated(index):
# GH#37374 these will become logical ops, not setops
with tm.assert_produces_warning(FutureWarning):
index |= index
with tm.assert_produces_warning(FutureWarning):
index &= index
with tm.assert_produces_warning(FutureWarning):
index ^= index
@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]])
def test_intersection_duplicates(values):
# GH#31326
a = pd.Index(values)
b = pd.Index([3, 3])
result = a.intersection(b)
expected = pd.Index([3])
tm.assert_index_equal(result, expected)
class TestSetOps:
# Set operation tests shared by all indexes in the `index` fixture
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(self, case, method, index):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(index, method)(case)
def test_intersection_base(self, index):
if isinstance(index, CategoricalIndex):
return
first = index[:5]
second = index[:3]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
if is_datetime64tz_dtype(index.dtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self, index):
first = index[3:]
second = index[:5]
everything = index
union = first.union(second)
assert tm.equalContents(union, everything)
if is_datetime64tz_dtype(index.dtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if not isinstance(index, CategoricalIndex):
result = first.union(case)
assert tm.equalContents(result, everything), (
result,
everything,
type(case),
)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
def test_difference_base(self, sort, index):
first = index[2:]
second = index[:4]
if isinstance(index, CategoricalIndex) or index.is_boolean():
answer = []
else:
answer = index[4:]
result = first.difference(second, sort)
assert tm.equalContents(result, answer)
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
assert type(result) == type(answer)
tm.assert_numpy_array_equal(
result.sort_values().asi8, answer.sort_values().asi8
)
else:
result = first.difference(case, sort)
assert tm.equalContents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self, index):
if isinstance(index, CategoricalIndex):
return
if len(index) < 2:
return
if index[0] in index[1:] or index[-1] in index[:-1]:
# index fixture has e.g. an index of bools that does not satisfy this,
# another with [0, 0, 1, 1, 2, 2]
return
first = index[1:]
second = index[:-1]
answer = index[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case)
if is_datetime64tz_dtype(first):
# second.values casts to tznaive
expected = first.union(case)
tm.assert_index_equal(result, expected)
continue
assert tm.equalContents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH#9943, GH#9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH#35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_intersection_name_retention_with_nameless(self, index):
if isinstance(index, MultiIndex):
index = index.rename(list(range(index.nlevels)))
else:
index = index.rename("foo")
other = np.asarray(index)
result = index.intersection(other)
assert result.name == index.name
# empty other, same dtype
result = index.intersection(other[:0])
assert result.name == index.name
# empty `self`
result = index[:0].intersection(other)
assert result.name == index.name
def test_difference_preserves_type_empty(self, index, sort):
# GH#20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
if not index.is_unique:
return
result = index.difference(index, sort=sort)
expected = index[:0]
tm.assert_index_equal(result, expected, exact=True)
def test_difference_name_retention_equals(self, index, sort, names):
if isinstance(index, MultiIndex):
names = [[x] * index.nlevels for x in names]
index = index.rename(names[0])
other = index.rename(names[1])
assert index.equals(other)
result = index.difference(other)
expected = index[:0].rename(names[2])
| tm.assert_index_equal(result, expected) | pandas._testing.assert_index_equal |
from collections import deque
import numpy as np
import pandas as pd
import itertools
from numpy.random import default_rng
def _get_n(iterable, n):
return list(itertools.islice(iterable, n))
def generate_ma(*coeffs, std=1.0, seed=12345):
rng = default_rng(seed=seed)
n = len(coeffs)
past_terms = deque(maxlen=n)
past_terms.extend([0.0]*n)
coeffs = tuple(reversed(coeffs))
while True:
err = rng.normal(0, std)
yield err + sum(c*e for c, e in zip(coeffs, past_terms))
past_terms.append(err)
def generate_ar(*coeffs, const=0.0, start=0.0):
n = len(coeffs)
past_terms = deque(maxlen=n)
past_terms.extend([0.0]*(n-1))
past_terms.append(start)
coeffs = tuple(reversed(coeffs))
while True:
curr = const + sum(c*t for c, t in zip(coeffs, past_terms))
yield curr
past_terms.append(curr)
def generate_arma(ar_coeffs=(0.9,), const=0.0, start=0.0,
ma_coeffs=(), noise_std=1.0, seed=None):
n = len(ar_coeffs)
past_terms = deque(maxlen=n)
past_terms.extend([0.0]*(n-1))
past_terms.append(start)
coeffs = tuple(reversed(ar_coeffs))
yield start
ma_proc = generate_ma(*ma_coeffs, std=noise_std, seed=seed)
for err in ma_proc:
curr = const + err + sum(c*t for c, t in zip(coeffs, past_terms))
yield curr
past_terms.append(curr)
def undifference(iterable):
tot = next(iterable) # first term
for cur in iterable:
yield tot
tot += cur
def add_season_ar(iterable, period=7, coeffs=(0.7,)):
n = len(coeffs)
coeffs = tuple(reversed(coeffs))
N = n + period - 1
past_vals = deque(maxlen=N)
past_vals.extend([0.0]*N)
for item in iterable:
new = item + sum(coeffs[i]*past_vals[i] for i in range(n))
yield new
past_vals.append(new)
def generate_sample_data(train=366, test=50, trend=0.0, undiff=False, seasonal=False):
gen = generate_arma(seed=12345, const=trend, ar_coeffs=(0.8,), ma_coeffs=(-0.5,))
if seasonal:
gen = add_season_ar(gen)
if undiff:
gen = undifference(gen)
indices = pd.date_range("2020-01-01", periods=train+test)
data = _get_n(gen, train+test)
return ( | pd.Series(data[:-test], index=indices[:-test]) | pandas.Series |
"""
Simplest aggregation algorithms tests on toy YSDA dataset
Testing all boundary conditions and asserts
"""
import numpy as np
import pandas as pd
import pytest
from crowdkit.aggregation import DawidSkene
from pandas.testing import assert_frame_equal, assert_series_equal
def test_aggregate_ds_on_toy_ysda(toy_answers_df, toy_ground_truth_df):
np.random.seed(42)
assert_series_equal(
DawidSkene(10).fit(toy_answers_df).labels_.sort_index(),
toy_ground_truth_df.sort_index(),
)
def test_aggregate_ds_on_simple(simple_answers_df, simple_ground_truth_df):
np.random.seed(42)
assert_series_equal(
DawidSkene(10).fit(simple_answers_df).labels_.sort_index(),
simple_ground_truth_df.sort_index(),
)
def _make_probas(data):
# TODO: column should not be an index!
columns = pd.Index(['task', 'no', 'yes'], name='label')
return pd.DataFrame(data, columns=columns).set_index('task')
def _make_tasks_labels(data):
# TODO: should task be indexed?
return pd.DataFrame(data, columns=['task', 'label']).set_index('task').squeeze().rename()
def _make_errors(data):
return pd.DataFrame(
data,
columns=['performer', 'label', 'no', 'yes'],
).set_index(['performer', 'label'])
@pytest.fixture
def data():
return pd.DataFrame(
[
['t1', 'w1', 'no'],
['t1', 'w2', 'yes'],
# ['t1', 'w3', np.NaN],
['t1', 'w4', 'yes'],
['t1', 'w5', 'no'],
['t2', 'w1', 'yes'],
['t2', 'w2', 'yes'],
['t2', 'w3', 'yes'],
['t2', 'w4', 'no'],
['t2', 'w5', 'no'],
['t3', 'w1', 'yes'],
['t3', 'w2', 'no'],
['t3', 'w3', 'no'],
['t3', 'w4', 'yes'],
['t3', 'w5', 'no'],
['t4', 'w1', 'yes'],
['t4', 'w2', 'yes'],
['t4', 'w3', 'yes'],
['t4', 'w4', 'yes'],
['t4', 'w5', 'yes'],
['t5', 'w1', 'yes'],
['t5', 'w2', 'no'],
['t5', 'w3', 'no'],
['t5', 'w4', 'no'],
['t5', 'w5', 'no'],
],
columns=['task', 'performer', 'label']
)
@pytest.fixture
def probas_iter_0():
return _make_probas([
['t1', 0.5, 0.5],
['t2', 0.4, 0.6],
['t3', 0.6, 0.4],
['t4', 0.0, 1.0],
['t5', 0.8, 0.2],
])
@pytest.fixture
def priors_iter_0():
return pd.Series([0.46, 0.54], pd.Index(['no', 'yes'], name='label'))
@pytest.fixture
def tasks_labels_iter_0():
return _make_tasks_labels([
['t1', 'no'],
['t2', 'yes'],
['t3', 'no'],
['t4', 'yes'],
['t5', 'no'],
])
@pytest.fixture
def errors_iter_0():
return _make_errors([
['w1', 'no', 0.22, 0.19],
['w1', 'yes', 0.78, 0.81],
['w2', 'no', 0.61, 0.22],
['w2', 'yes', 0.39, 0.78],
['w3', 'no', 0.78, 0.27],
['w3', 'yes', 0.22, 0.73],
['w4', 'no', 0.52, 0.30],
['w4', 'yes', 0.48, 0.70],
['w5', 'no', 1.00, 0.63],
['w5', 'yes', 0.00, 0.37],
])
@pytest.fixture
def probas_iter_1():
return _make_probas([
['t1', 0.35, 0.65],
['t2', 0.26, 0.74],
['t3', 0.87, 0.13],
['t4', 0.00, 1.00],
['t5', 0.95, 0.05],
])
@pytest.fixture
def priors_iter_1():
# return pd.Series([0.49, 0.51], pd.Index(['no', 'yes'], name='label'))
return pd.Series([0.49, 0.51], pd.Index(['no', 'yes']))
@pytest.fixture
def tasks_labels_iter_1():
return _make_tasks_labels([
['t1', 'yes'],
['t2', 'yes'],
['t3', 'no'],
['t4', 'yes'],
['t5', 'no'],
])
@pytest.fixture
def errors_iter_1():
return _make_errors([
['w1', 'no', 0.14, 0.25],
['w1', 'yes', 0.86, 0.75],
['w2', 'no', 0.75, 0.07],
['w2', 'yes', 0.25, 0.93],
['w3', 'no', 0.87, 0.09],
['w3', 'yes', 0.13, 0.91],
['w4', 'no', 0.50, 0.31],
['w4', 'yes', 0.50, 0.69],
['w5', 'no', 1.00, 0.61],
['w5', 'yes', 0.00, 0.39],
])
@pytest.mark.parametrize('n_iter', [0, 1])
def test_dawid_skene_step_by_step(request, data, n_iter):
probas = request.getfixturevalue(f'probas_iter_{n_iter}')
labels = request.getfixturevalue(f'tasks_labels_iter_{n_iter}')
errors = request.getfixturevalue(f'errors_iter_{n_iter}')
priors = request.getfixturevalue(f'priors_iter_{n_iter}')
ds = DawidSkene(n_iter).fit(data)
assert_frame_equal(probas, ds.probas_, check_like=True, atol=0.005)
assert_frame_equal(errors, ds.errors_, check_like=True, atol=0.005)
assert_series_equal(priors, ds.priors_, atol=0.005)
assert_series_equal(labels, ds.labels_, atol=0.005)
def test_dawid_skene_on_empty_input(request, data):
ds = DawidSkene(10).fit(pd.DataFrame([], columns=['task', 'performer', 'label']))
assert_frame_equal(pd.DataFrame(), ds.probas_, check_like=True, atol=0.005)
assert_frame_equal(pd.DataFrame(), ds.errors_, check_like=True, atol=0.005)
assert_series_equal(pd.Series(), ds.priors_, atol=0.005)
assert_series_equal(pd.Series(), ds.labels_, atol=0.005)
@pytest.mark.parametrize('overlap', [3, 300, 30000])
def test_dawid_skene_overlap(overlap):
data = pd.DataFrame([
{
'task': f't{task_id}',
'performer': f'p{perf_id}',
'label': 'yes' if (perf_id - task_id) % 3 else 'no',
}
for perf_id in range(overlap)
for task_id in range(3)
])
ds = DawidSkene(20).fit(data)
expected_probas = _make_probas([[f't{task_id}', 1/3., 2/3] for task_id in range(3)])
expected_labels = _make_tasks_labels([[f't{task_id}', 'yes'] for task_id in range(3)])
# TODO: check errors_
assert_frame_equal(expected_probas, ds.probas_, check_like=True, atol=0.005)
assert_series_equal(expected_labels, ds.labels_, atol=0.005)
assert_series_equal( | pd.Series({'no': 1/3, 'yes': 2/3}) | pandas.Series |
import numpy as np
import pandas as pd
import datetime
import argparse
def readCSV(dt):
"""
Read the CSV file into a dataframe for a YYYY-MM (dt)
Do preliminary cleaning
arg: dt -- string with format YYYY-MM
return df: dataframe containing data from csv
"""
folder = 'raw_data/'
filename = 'output-' + str(dt) + '-01T00_00_00+00_00.csv'
df = pd.read_csv(folder+filename)
df.when_captured = pd.to_datetime(df.when_captured)
# Need to change the format of the Time Stamp for all the measurements in the raw data
df.service_uploaded = df.service_uploaded.apply(lambda x: \
datetime.datetime.strptime(x, '%b %d, %Y @ %H:%M:%S.%f')\
.replace(tzinfo=datetime.timezone.utc))
#### Add a column for the year
df['year'] = pd.DatetimeIndex(df['when_captured']).year
#### Need to correct for the format of the PM numeric values.
df['pms_pm01_0'] = df['pms_pm01_0'].astype(str).str.replace(',', '').astype(float)
df['pms_pm10_0'] = df['pms_pm10_0'].astype(str).str.replace(',', '').astype(float)
df['pms_pm02_5'] = df['pms_pm02_5'].astype(str).str.replace(',', '').astype(float)
return df
def findBadData(df):
'''
return the badRecords, i.e. (device, whenCaptured) key for records that have more than one
records for the same key (as this is not possible physically)
'''
temp_df = df.groupby(['device','when_captured']).size().to_frame('size').\
reset_index().sort_values('size', ascending=False)
print("bad device data counts: ")
badRecords = temp_df[(temp_df['size']>1)]
print(badRecords)
print("all bad device list: ")
# Devices that have misbehaved at some point - more than one data values per time stamp
print(np.unique(temp_df[temp_df['size']>1]['device'].values)) # devices that have misbehaved
return badRecords
def rmInvalidTimeStamps(df):
"""
remove invalid time stamped records
## remove records with NULL `when_captured`
## remove records where `when_captured` is an invalid
## remove records where gap of `service_uploaded` and `when_captured` > 7 days
"""
## remove records with NULL `when_captured`
print("Null date records to remove: ", df['when_captured'].isna().sum())
df = df[df['when_captured'].notna()]
print("df shape after remove records with NULL `when_captured` : ",df.shape)
## remove records where `when_captured` is an invalid
boolean_condition = df['when_captured'] > | pd.to_datetime(2000/1/19, infer_datetime_format=True) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 09:27:49 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import numpy as np
from country import country
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
from scipy.optimize import brute
from scipy.interpolate import interp1d
from scipy.ndimage.filters import uniform_filter1d
import psutil
from functools import partial
import multiprocessing as mp
from tqdm import tqdm_notebook as tqdm
import pdb
from datetime import date, datetime, timedelta
import time
from pathlib import Path
from matplotlib import pyplot as plt
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.patches as mpatches
import country_converter as coco
import math
import seaborn as sns
# --------------------------------------------------------
# Global variables, chosen cohorts of data and estimates
# --------------------------------------------------------
from param_simple import *
# ----------------------
# Main class
# ----------------------
class solveCovid:
def __init__(self,iso2: str): # eg 'US'
self.iso2 = iso2
# Policy strategies for forecast
self.policy = 'optim' # ['optim', 'linear']
self.phi_option = 'fit' # ['fit','exo']: Fit phi to latest data or specify as exogenous
self.phi_exo = 2.5e-9 # weight on mobility in social welfare function
self.phi_min = 1e-13 # Lowerbound for phi - authorities care about output
# Infection rate model for forecast
self.gamma_tilde_model = 'AR1' # ['AR1','AR2','shock']
self.gamma_shock_length = 10 # Shock gamma_tilde for x days
self.gamma_shock_depth = 0.5 # Daily increment of gamma
self.default_init_single = default_init_single
self.default_bounds_single = default_bounds_single
# Vaccine assumptions
self.vac_assump = 'vac_base' # Vaccination scenarios: ['vac_base','vac_worse','vac_better']
self.vac_receiver = 'S+R' # Vaccines given to S or S+R? ['S only','S+R']
self.effi_one = 0.5 # Efficacy after one dose in %
self.effi_two = 0.95 # Efficacy after two doses in %
self.target_weight = 0.7 # How targeted vaccine distribution is (1 = sequenced from eldest to youngest, 0 is random)
self.vac_base_cover = 1 # Baseline: (already started): % of effective coverage by December 2021 (to be controlled by country-specific scaling factor below)
self.vac_base_delayedstart = '2021-06-30' # Baseline: (hasn't started): first date of vaccination
self.vac_base_delayedcover = 0.75 # Baseline: (hasn't started): % of contracted dosages deployed by December 2021
self.vac_worse_cover = 0.3 # Worse (started): Use by end of 2021
self.vac_worse_delayedstart = '2021-09-30' # Worse (hasn't started): Starting date
self.vac_worse_delayedcover = 0.3 # Worse (hasn't started): Use by end of 2021
self.vac_better_cover = 1.3
self.vac_better_delayedstart = '2021-06-30'
self.vac_better_delayedcover = 1
# Reinfection and loss of immunity
self.reinfect = 'immune' # ['immune','reinfect']
self.r_re1_R = np.log(2)/10000 # Baseline: R loses immunity after 3 years
self.r_re1_V = np.log(2)/10000 # Baseline: V loses immunity after 3 years
self.r_re2_R = np.log(2)/60 # Downside risk: R loses immunity after 60 days, approx 1% of R lose immunity each day
self.r_re2_V = np.log(2)/60 # Downside risk: V loses immunity after 60 days, approx 1% of V lose immunity each day
# Death probabilities
self.pdth_assump = 'martingale' # ['martingale','treatment']
self.pdth_min = 0.005 # Lowerbound on death probability - countries with very few cases still think there is death probability
self.pdth_halflife = 60 # Halflife for treatment case; no. of days it takes to close half the gap of current and assumed minimum death prob
self.pdth_theta = np.exp(-np.log(2)/self.pdth_halflife)
# --------------- 1. Preliminary: Get the data ------------------------
def prelim(self):
iso2 = self.iso2
self.N = df1.fillna(method='ffill')['population'][iso2].iloc[-1]
df2 = df1.iloc[:,df1.columns.get_level_values(1)==iso2][[
'total_cases','total_deaths','new_cases','new_deaths',
'google_smooth','icu_patients','hosp_patients','reproduction_rate',
'new_tests','tests_per_case','aged_70_older',
'vac_total','vac_people',
'vac_fully']][df1['total_cases'][iso2] > virus_thres]
df2 = df2.droplevel('iso2',axis=1)
df2['vac_total'] = df2['vac_total'].interpolate()
df2['vac_people'] = df2['vac_people'].interpolate()
if iso2 == 'AU' or iso2 == 'SA': # Countries with no breakdowns; do manual approximation
df2['vac_partial'] = 0.8 * df2['vac_total']
df2['vac_fully'] = 0.2 * df2['vac_total']
else : # For most countries,
date1 = df2['vac_fully'].first_valid_index() # Next 2 lines fill NA in 'vac_fully', so vac_partial is defined
df2['vac_fully'].iloc[:df2.index.get_loc(date1)-1] = 0
df2['vac_fully'] = df2['vac_fully'].interpolate()
df2['vac_partial'] = df2['vac_people'] - df2['vac_fully']
df2 = df2.fillna(0) # Replace NaN by 0 - deaths and vaccinations
PopulationI = df2['total_cases'][0]
PopulationD = df2['total_deaths'][0]
if PopulationD==0:
PopulationD = 0
PopulationR = 5
else:
PopulationR = PopulationD * 5
PopulationCI = PopulationI - PopulationD - PopulationR # Undetected and infectious cases
self.cases_data_fit = df2['total_cases'].tolist()
self.deaths_data_fit = df2['total_deaths'].tolist()
self.newcases_data_fit = df2['new_cases'].tolist()
self.newdeaths_data_fit = df2['new_deaths'].tolist()
self.balance = self.cases_data_fit[-1] / max(self.deaths_data_fit[-1], 10) / 3
date_day_since100 = | pd.to_datetime(df2.index[0]) | pandas.to_datetime |
import pickle
import numpy as np
import utils
from pathlib import Path
import pandas as pd
from itertools import combinations, product
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import TwoSlopeNorm
from itertools import groupby
def add_line(ax, xpos, ypos, horizontal):
if horizontal:
line = plt.Line2D([ypos, ypos + .25], [xpos, xpos], color='black', transform=ax.transAxes, linewidth=0.1)
else:
line = plt.Line2D([xpos + .01, xpos + .01], [ypos - .09, ypos + .16], color='black', transform=ax.transAxes,
linewidth=0.1)
line.set_clip_on(False)
ax.add_line(line)
def label_len(my_index, level):
labels = my_index.get_level_values(level)
return [(k, sum(1 for i in g)) for k, g in groupby(labels)]
def label_group_bar_table(ax, df, horizontal):
if horizontal:
xpos = -.15
else:
lypos = -.15
scale = 1. / df.index.size
for level in range(df.index.nlevels):
pos = df.index.size if horizontal else -0.5
prev_line = 0.
for label, rpos in label_len(df.index, level):
if level == 1:
pos -= rpos if horizontal else -rpos
size = 5 if rpos == 1 else 7 if rpos == 2 else 9 if rpos < 5 else 11
if horizontal:
add_line(ax, pos * scale, xpos, horizontal)
lypos = (pos + .5 * rpos) * scale - 0.005
ax.text(xpos + .1, lypos, label, ha='center', transform=ax.transAxes, fontsize=size)
else:
add_line(ax, pos * scale, lypos, horizontal)
xpos = (pos + .0025 * rpos) * scale
text_x = prev_line + (pos * scale - prev_line) / 2
prev_line = pos * scale
ax.text(text_x + 0.01, lypos, label, ha='center', va='center', transform=ax.transAxes,
fontsize=size, rotation='vertical')
else:
for _ in range(rpos):
pos -= 1 if horizontal else -1
if horizontal:
lypos = (pos + .5) * scale - 0.005
ax.text(xpos + .1, lypos, label, ha='center', transform=ax.transAxes, fontsize=5)
else:
xpos = (pos + .5) * scale - 0.005
ax.text(xpos, lypos + .1, label, ha='center', va='center', transform=ax.transAxes, fontsize=5,
rotation='vertical')
# add_line(ax, pos*scale , xpos)
if horizontal:
xpos -= .1
else:
lypos -= .01
def obtain_ranking(model_type, lan, att, layer, ranking):
pkl_path = Path('pickles', 'UM', model_type, lan, att)
res_path = Path('results', 'UM', model_type, lan, att)
try:
if ranking == 'ttb linear':
label_to_idx_path = Path(pkl_path, 'label_to_idx.pkl')
with open(label_to_idx_path, 'rb') as f:
label_to_idx = pickle.load(f)
num_labels = len(label_to_idx)
linear_model_path = Path(pkl_path, 'best_model_whole_vector_layer_' + str(layer))
neurons = utils.sort_neurons_by_avg_weights(linear_model_path.__str__(), num_labels)
if ranking == 'ttb gaussian':
res_file_dir = Path(res_path, 'layer ' + str(layer))
bayes_res_path = Path(res_file_dir, 'bayes by bayes mi')
neurons = utils.sort_neurons_by_bayes_mi(bayes_res_path)
if ranking == 'ttb probeless':
cluster_ranking_path = Path(pkl_path, str(layer), 'cluster_ranking.pkl')
neurons = utils.sort_neurons_by_clusters(cluster_ranking_path)
except:
return None
return neurons
def get_all_rankings(model_type, num_neurons: int = 768):
res_root = Path('results', 'UM', model_type)
languages = [name.name for name in res_root.glob('*') if name.is_dir()]
# languages = ['eng']
attributes = set([att.name for lan in languages for att in Path(res_root, lan).glob('*') if att.is_dir()])
layers = [2, 7, 12]
rankings = ['ttb linear', 'ttb gaussian', 'ttb probeless']
# cols = pd.MultiIndex.from_product([languages, attributes, layers])
# rows = pd.MultiIndex.from_product([rankings])
# df = pd.DataFrame(index=rows, columns=cols).sort_index().sort_index(axis=1)
all_rankings = dict()
for lan in languages:
print(lan)
all_rankings[lan] = dict()
for att in attributes:
if not Path('pickles', 'UM', model_type, lan, att).exists():
continue
all_rankings[lan][att] = dict()
for layer in layers:
all_rankings[lan][att][layer] = dict()
for ranking in rankings:
all_rankings[lan][att][layer][ranking] = obtain_ranking(model_type, lan, att, layer, ranking)
with open(Path('pickles', 'UM', model_type, 'all_rankings.pkl'), 'wb+') as f:
pickle.dump(all_rankings, f)
def rename_att(att):
new_att = 'Gender' if att.startswith('Gender') else 'POS' if att.startswith('Part') else att
return new_att
def plot_heatmap(model_type, num_neurons):
with open(Path('pickles', 'UM', model_type, 'all_rankings.pkl'), 'rb') as f:
all_rankings: dict = pickle.load(f)
top_neurons = {(rename_att(att), lan, layer, ranking): values[:num_neurons] if values else None for lan, lan_dict in
all_rankings.items() for
att, att_dict in lan_dict.items()
for layer, layer_dict in att_dict.items() for ranking, values in layer_dict.items()}
top_neurons = pd.DataFrame(top_neurons)
attributes, languages, layers, rankings = top_neurons.axes[1].levels
indices = []
languages = ['eng', 'spa', 'fra', 'fin', 'bul', 'rus', 'hin', 'ara', 'tur']
idx = pd.IndexSlice
for att in attributes:
for lan in languages:
if att not in set([a for a, _, _, _ in top_neurons.loc[:, idx[:, [lan]]].columns.values]):
continue
indices.append(f'{att}, {lan}')
# indices.sort(key=lambda x:x[1])
# matrix = pd.DataFrame(index=indices, columns=indices)
atts = [label[:label.index(',')] for label in indices]
lans = [label[label.index(' ') + 1:] for label in indices]
tuples = list(zip(lans, atts))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
matrix = | pd.DataFrame(index=index, columns=index) | pandas.DataFrame |
import os
import gc
import sys
import time
import click
import random
import sklearn
import numpy as np
import pandas as pd
import lightgbm as lgb
from tqdm import tqdm
from pprint import pprint
from functools import reduce
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, roc_curve
from config import read_config, KEY_FEATURE_MAP, KEY_MODEL_MAP
from utils import timer
from features.base import Base
from features.stacking import StackingFeaturesWithPasses
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_train_test(conf):
df = Base.get_df(conf) # pd.DataFrame
feature_classes = [KEY_FEATURE_MAP[key] for key in conf.features]
features = [df]
for feature in feature_classes:
with timer(f"load (or create) {feature.__name__}"):
f = feature.get_df(conf)
features.append(f)
with timer("join on SK_ID_CURR"):
df = reduce(lambda lhs, rhs: lhs.merge(rhs, how='left', on='SK_ID_CURR'), features)
del features
gc.collect()
train_df = df[df['TARGET'].notnull()].copy()
test_df = df[df['TARGET'].isnull()].copy()
del df
gc.collect()
return train_df, test_df
def get_feature_importances(data, shuffle, seed=None):
# Gather real features
train_features = [f for f in data.columns if f not in ([
'TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index'
])]
# Go over fold and keep track of CV score (train and valid) and feature importances
# Shuffle target if required
y = data['TARGET'].copy()
if shuffle:
# Here you could as well use a binomial distribution
y = data['TARGET'].copy().sample(frac=1.0)
# Fit LightGBM in RF mode, yes it's quicker than sklearn RandomForest
dtrain = lgb.Dataset(data[train_features], y, free_raw_data=False, silent=True)
lgb_params = {
'objective': 'binary',
'boosting_type': 'rf',
'subsample': 0.623,
'colsample_bytree': 0.7,
'num_leaves': 127,
'max_depth': 8,
'seed': seed,
'bagging_freq': 1,
'num_threads': 4,
'verbose': -1
}
# Fit the model
clf = lgb.train(params=lgb_params, train_set=dtrain, num_boost_round=600)
# Get feature importances
imp_df = pd.DataFrame()
imp_df["feature"] = list(train_features)
imp_df["importance_gain"] = clf.feature_importance(importance_type='gain')
imp_df["importance_split"] = clf.feature_importance(importance_type='split')
imp_df['trn_score'] = roc_auc_score(y, clf.predict(data[train_features]))
return imp_df
def score_feature_selection(df=None, train_features=None, target=None):
# Fit LightGBM
dtrain = lgb.Dataset(df[train_features], target, free_raw_data=False, silent=True)
lgb_params = {
'objective': 'binary',
'boosting_type': 'gbdt',
'learning_rate': .1,
'subsample': 0.8,
'colsample_bytree': 0.8,
'num_leaves': 31,
'max_depth': -1,
'seed': 13,
'num_threads': 4,
'min_split_gain': .00001,
'reg_alpha': .00001,
'reg_lambda': .00001,
'metric': 'auc'
}
# Fit the model
hist = lgb.cv(
params=lgb_params,
train_set=dtrain,
num_boost_round=2000,
nfold=5,
stratified=True,
shuffle=True,
early_stopping_rounds=50,
verbose_eval=500,
seed=47
)
# Return the last mean / std values
return hist['auc-mean'][-1], hist['auc-stdv'][-1]
@click.command()
@click.option('--config_file', type=str, default='./configs/lgbm_0.json')
def main(config_file):
np.random.seed(47)
conf = read_config(config_file)
print("config:")
pprint(conf)
data, _ = get_train_test(conf)
with timer("calc actual importance"):
if os.path.exists("misc/actual_imp_df.pkl"):
actual_imp_df = pd.read_pickle("misc/actual_imp_df.pkl")
else:
actual_imp_df = get_feature_importances(data=data, shuffle=False)
actual_imp_df.to_pickle("misc/actual_imp_df.pkl")
print(actual_imp_df.head())
with timer("calc null importance"):
nb_runs = 100
if os.path.exists(f"misc/null_imp_df_run{nb_runs}time.pkl"):
null_imp_df = pd.read_pickle(f"misc/null_imp_df_run{nb_runs}time.pkl")
else:
null_imp_df = pd.DataFrame()
for i in range(nb_runs):
start = time.time()
# Get current run importances
imp_df = get_feature_importances(data=data, shuffle=True)
imp_df['run'] = i + 1
# Concat the latest importances with the old ones
null_imp_df = pd.concat([null_imp_df, imp_df], axis=0)
# Display current run and time used
spent = (time.time() - start) / 60
dsp = '\rDone with %4d of %4d (Spent %5.1f min)' % (i + 1, nb_runs, spent)
print(dsp, end='', flush=True)
null_imp_df.to_pickle("misc/null_imp_df_run{nb_runs}time.pkl")
print(null_imp_df.head())
with timer('score features'):
feature_scores = []
for _f in actual_imp_df['feature'].unique():
f_null_imps_gain = null_imp_df.loc[null_imp_df['feature'] == _f, 'importance_gain'].values
f_act_imps_gain = actual_imp_df.loc[actual_imp_df['feature'] == _f, 'importance_gain'].mean()
gain_score = np.log(1e-10 + f_act_imps_gain / (1 + np.percentile(f_null_imps_gain, 75))) # Avoid didvide by zero
f_null_imps_split = null_imp_df.loc[null_imp_df['feature'] == _f, 'importance_split'].values
f_act_imps_split = actual_imp_df.loc[actual_imp_df['feature'] == _f, 'importance_split'].mean()
split_score = np.log(1e-10 + f_act_imps_split / (1 + np.percentile(f_null_imps_split, 75))) # Avoid didvide by zero
feature_scores.append((_f, split_score, gain_score))
scores_df = | pd.DataFrame(feature_scores, columns=['feature', 'split_score', 'gain_score']) | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from optparse import OptionParser
import os
from scipy.stats import entropy
from scipy import signal
import scipy.stats as spstats
import fnmatch
from datetime import datetime
from scipy.stats import skew
from scipy.stats import kurtosis
from scipy.stats import t
from scipy.optimize import fsolve
import scipy.special as sc
# Extracts aggregate features per run from raw eye tracking and oculomotor event data, and builds a single feature matrix for use as input to train and validate a predictive model. If the feature matrix file already exists from a prior run of getFeatureMatrix(), you can save time by specifying useExisting=True to load it directly from the file rather than recomputing it from scratch.
# Research was sponsored by the United States Air Force Research Laboratory and the
# United States Air Force Artificial Intelligence Accelerator and was accomplished
# under Cooperative Agreement Number FA8750-19-2-1000. The views and conclusions
# contained in this document are those of the authors and should not be interpreted
# as representing the official policies, either expressed or implied, of the United
# States Air Force or the U.S. Government. The U.S. Government is authorized to
# reproduce and distribute reprints for Government purposes notwithstanding any
# copyright notation herein.
# def main():
# parser = OptionParser()
# parser.add_option('-d', '--dataDir', action="store", dest="dataDir", default=None, help="The top level data directory containing all the raw signal files for each subject.")
# parser.add_option('-o', '--outFilePath', action="store", dest="outFilePath", default=None, help="File to write full feature matrix.");
# (options, args) = parser.parse_args()
# getFeatureMatrix(options.dataDir, options.outFilePath);
def getFeatureMatrix(dataDir, filePath, useExisting):
if useExisting:
if os.path.exists(filePath):
print("Found precomputed feature matrix.")
featMatDF = pd.read_csv(filePath)
print("Loaded into a dataFrame.")
return featMatDF
else:
print(
"Cannot use existing feature matrix because specified file was not found. Recomputing it from scratch."
)
subjDirs = [f.path for f in os.scandir(dataDir) if f.is_dir()]
dfHeader = [
"Subject",
"Session",
"Run",
"OverallGazeEntropyLX",
"psdMaxLX",
"psdFreqOfMaxLX",
"OverallGazeEntropyLY",
"psdMaxLY",
"psdFreqOfMaxLY",
"OverallGazeEntropyLZ",
"psdMaxLZ",
"psdFreqOfMaxLZ",
"OverallGazeEntropyRX",
"psdMaxRX",
"psdFreqOfMaxRX",
"OverallGazeEntropyRY",
"psdMaxRY",
"psdFreqOfMaxRY",
"OverallGazeEntropyRZ",
"psdMaxRZ",
"psdFreqOfMaxRZ",
"EyesClosedFractionL",
"EyesClosedFractionR",
"PupilDiamMeanL",
"PupilDiamStdevL",
"PupilDiamSkewL",
"PupilDiamKurtL",
"PupilDiamMeanR",
"PupilDiamStdevR",
"PupilDiamSkewR",
"PupilDiamKurtR",
"FixDurMean",
"FixDurStdev",
"FixDurSkew",
"FixDurKurt",
"FixDensityMean",
"FixDensityStdev",
"FixDensitySkew",
"FixDensityKurt",
"SacMainSeqMean",
"SacMainSeqStdev",
"SacPeakVelMean",
"SacPeakVelStdev",
]
# walks through the directory structure of the raw data
featMat = []
ctr = 1
for subjd in subjDirs:
sessDirs = [f.path for f in os.scandir(subjd) if f.is_dir()]
print(
"Processing subject "
+ str(ctr)
+ " of "
+ str(len(subjDirs))
+ ": "
+ os.path.basename(subjd)
)
ctr = ctr + 1
for sessd in sessDirs:
runDirs = [f.path for f in os.scandir(sessd) if f.is_dir()]
for rund in runDirs:
dataFiles = [f.path for f in os.scandir(rund) if f.is_file()]
toks = rund.split(os.path.sep)
subj = toks[-3]
sess = toks[-2]
run = toks[-1]
rawEyeFile = fnmatch.filter(dataFiles, "*lslhtcviveeye*.csv")
dfraw = | pd.read_csv(rawEyeFile[0]) | pandas.read_csv |
'''
In order to generate Betti_0 and betti_1 of 2017 dailyAmountMatrices, change the format of all matrices according the format of the http://people.maths.ox.ac.uk/nanda/perseus/
format:
3: the ambient dimension, i.e., the number of coordinates per vertex.
1 0.01 100: the radius scaling factor k=1, the step size s=0.01, the number of steps N=100
1.2 3.4 -0.9 0.5: the vertex (1.2, 3.4, -0.9) with associated radius r = 0.5
2.0 -6.6 4.1 0.3: the vertex (2.0, -6.6, 4.1) with associated radius r = 0.3
and so on!
example:
(http://people.maths.ox.ac.uk/nanda/source/distmat.txt)
then use the following command to convert matrix:
(path to perseus executable) (complex type) (input filename) (output file string)
command example:
./perseus distmat ../data/dailyAmoMatrices/amo2017001.csv ../data/random_regression
'''
import pandas as pd
import os, shutil
import numpy as np
YEAR = 2017
input_data_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/dailyAmoMatrices/"
output_data_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/processed_data/dailyVrAmoMatrices/"
def clean_folder(folder_name):
for filename in os.listdir(folder_name):
file_path = os.path.join(folder_name, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
print("Remove {} successful.".format(file_path) )
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
def read_csv(file_name, day):
names=[]
for i in range(20):
names.append(str(i))
data = pd.read_csv(file_name, header=None, names=names)
data = data/(10^8) + 1
data = data.apply(np.log)
row_count = | pd.DataFrame({"0": [data.shape[0]]}) | pandas.DataFrame |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A few common classes to be used for the library."""
import dataclasses
import enum
from typing import Any, Callable, Dict, NamedTuple
import pandas as pd
class GeoAssignment(enum.IntEnum):
"""Defines the values for Treatment/Control assignment."""
CONTROL = 0
TREATMENT = 1
EXCLUDED = -1
class GeoXType(enum.Enum):
"""Defines the types of experimental arms."""
CONTROL = enum.auto() # business as usual
GO_DARK = enum.auto() # stop ad spend (0 during test, >0 during pretest)
HEAVY_UP = enum.auto() # increase ad spend
HEAVY_DOWN = enum.auto() # decrease ad spend
HOLD_BACK = enum.auto() # start ad spend during test (0 during pretest)
GO_DARK_TREATMENT_NOT_BAU_CONTROL = enum.auto(
) # 0 ad spend in treatment, changed (up/down) ad spend in control
class GeoLevelData(NamedTuple):
"""Geo level data."""
geo: int
response: float
spend: float
class GeoLevelPotentialOutcomes(NamedTuple):
"""Two potential outcomes."""
controlled: GeoLevelData
treated: GeoLevelData
@dataclasses.dataclass
class TimeWindow:
"""Defines a time window using first day and last day."""
first_day: pd.Timestamp
last_day: pd.Timestamp
def __post_init__(self):
if not isinstance(self.first_day, pd.Timestamp):
self.first_day = | pd.Timestamp(self.first_day) | pandas.Timestamp |
"""
https://pandas.pydata.org/pandas-docs/stable/getting_started/overview.html
pandas is a Python package providing fast, flexible, and expressive data structures
designed to make working with “relational” or “labeled” data both easy and intuitive.
It aims to be the fundamental high-level building block for doing practical,
real world data analysis in Python.
Additionally, it has the broader goal of becoming the most powerful
and flexible open source data analysis / manipulation tool available in any language.
It is already well on its way toward this goal.
pandas is well suited for many different kinds of data:
Tabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet
Ordered and unordered (not necessarily fixed-frequency) time series data.
Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels
Any other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame (2-dimensional),
handle the vast majority of typical use cases in
finance, statistics, social science, and many areas of engineering.
For R users, DataFrame provides everything that R’s data.frame provides and much more.
pandas is built on top of NumPy and is intended to integrate well within
a scientific computing environment with many other 3rd party libraries.
Here are just a few of the things that pandas does well:
Easy handling of missing data (represented as NaN) in floating point
as well as non-floating point data
Size mutability: columns can be inserted and deleted from
DataFrame and higher dimensional objects
Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels,
or the user can simply ignore the labels and let Series, DataFrame, etc.
automatically align the data for you in computations
Powerful, flexible group by functionality to perform split-apply-combine operations
on data sets, for both aggregating and transforming data
Make it easy to convert ragged, differently-indexed data in other Python
and NumPy data structures into DataFrame objects
Intelligent label-based slicing, fancy indexing, and subsetting of large data sets
Intuitive merging and joining data sets
Flexible reshaping and pivoting of data sets
Hierarchical labeling of axes (possible to have multiple labels per tick)
Robust IO tools for loading data from flat files (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast HDF5 format
Time series-specific functionality: date range generation and frequency conversion,
moving window statistics, moving window linear regressions, date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments.
For data scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display.
pandas is the ideal tool for all of these tasks.
Some other notes
pandas is fast. Many of the low-level algorithmic bits have been extensively tweaked
in Cython code. However, as with anything else generalization usually sacrifices performance.
So if you focus on one feature for your application you may be able
to create a faster specialized tool.
pandas is a dependency of statsmodels, making it an important part of
the statistical computing ecosystem in Python.
pandas has been used extensively in production in financial applications.
Data Structures
Dimensions Name Description
1 Series 1D labeled homogeneously-typed array
2 DataFrame General 2D labeled, size-mutable tabular structure with potentially heterogeneously-typed column
Why more than one data structure?
The best way to think about the pandas data structures is as flexible containers
for lower dimensional data. For example, DataFrame is a container for Series,
and Series is a container for scalars. We would like to be able to insert and remove
objects from these containers in a dictionary-like fashion.
Also, we would like sensible default behaviors for the common API functions which take
into account the typical orientation of time series and cross-sectional data sets.
When using ndarrays to store 2- and 3-dimensional data, a burden is placed on the user
to consider the orientation of the data set when writing functions;
axes are considered more or less equivalent
(except when C- or Fortran-contiguousness matters for performance).
In pandas, the axes are intended to lend more semantic meaning to the data;
i.e., for a particular data set there is likely to be a “right” way to orient the data.
The goal, then, is to reduce the amount of mental effort required to code up data
transformations in downstream functions.
For example, with tabular data (DataFrame) it is more semantically helpful to think of the index
(the rows) and the columns rather than axis 0 and axis 1.
Iterating through the columns of the DataFrame thus results in more readable code:
for col in df.columns:
series = df[col]
# do something with series
Mutability and copying of data
All pandas data structures are value-mutable
(the values they contain can be altered) but not always size-mutable.
The length of a Series cannot be changed, but, for example, columns
can be inserted into a DataFrame. However, the vast majority of methods produce new objects
and leave the input data untouched. In general we like to favor immutability where sensible.
"""
import numpy as np
import pandas as pd
# Object Creation
# Creating a Series by passing a list of values, letting pandas create a default integer index
s = pd.Series([1, 3, 5, np.nan, 6, 8])
print(s)
# Creating a DataFrame by passing a NumPy array, with a datetime index and labeled columns
dates = pd.date_range('20130101', periods=6)
print(dates)
# Creating a DataFrame by passing a dict of objects that can be converted to series-like
df2 = pd.DataFrame({
'A': 1.,
'B': | pd.Timestamp('20130102') | pandas.Timestamp |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
| tm.assert_categorical_equal(res, exp) | pandas.util.testing.assert_categorical_equal |
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = pd.get_dummies(Submarket1, prefix="Submarket1")
# TAX_DISTR
TAX_DISTR = df.TAX_DISTR.copy()
TAX_DISTR[TAX_DISTR == ' '] = np.nan
TAX_DISTR = pd.get_dummies(TAX_DISTR, prefix="TAX_DISTR")
# NBHD
NBHD = df.NBHD.copy()
NBHD[NBHD == ' '] = np.nan
NBHD = pd.get_dummies(NBHD, prefix="NBHD")
# ZONING_NUM
ZONING_NUM = df.ZONING_NUM.copy()
ZONING_NUM[ZONING_NUM == ' '] = np.nan
ZONING_NUM = pd.get_dummies(ZONING_NUM, prefix="ZONING_NUM")
# building_c
building_c = df.building_c.copy()
building_c[building_c == ' '] = np.nan
building_c = pd.get_dummies(building_c, prefix="building_c")
# PROP_CLASS
PROP_CLASS = df.PROP_CLASS.copy()
PROP_CLASS[PROP_CLASS == ' '] = np.nan
PROP_CLASS = | pd.get_dummies(PROP_CLASS, prefix="PROP_CLASS") | pandas.get_dummies |
#!/usr/bin/env python
"""Module containing functions for converting messages to dataframe."""
import collections
import datetime
import stat
from typing import Text, Sequence, List, Any, Dict, Optional
import pandas as pd
from google.protobuf import descriptor
from google.protobuf import message
from grr_response_proto import osquery_pb2
from grr_response_proto import semantic_pb2
def from_sequence(seq: Sequence[Any]) -> pd.DataFrame:
"""Converts sequence of objects to a dataframe.
Args:
seq: Sequence of objects to convert.
Returns:
Pandas dataframe representing given sequence of objects.
"""
dframes = [from_object(obj) for obj in seq]
if not dframes:
return pd.DataFrame()
return pd.concat(dframes, ignore_index=True, sort=False)
def from_object(obj: Any) -> pd.DataFrame:
"""Converts object to a dataframe.
Args:
obj: Object to convert.
Returns:
Pandas dataframe representing given object.
"""
if isinstance(obj, message.Message):
return from_message(obj)
return pd.DataFrame(data=[obj])
def from_message(msg: message.Message,
components: Optional[List[Text]] = None) -> pd.DataFrame:
"""Converts protobuf message to a dataframe.
Args:
msg: Protobuf message to convert.
components: Prefixes for column names.
Returns:
Pandas dataframe representing given message.
"""
if components is None:
components = []
data = {}
for desc, value in msg.ListFields():
if isinstance(value, message.Message):
data.update(from_message(value, components + [desc.name]))
else:
data.update(_get_pretty_value(value, desc, components))
return | pd.DataFrame(data=data) | pandas.DataFrame |
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = 'grburgess'
import collections
import os
import numpy as np
import pandas as pd
from pandas import HDFStore
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.file_utils import sanitize_filename
from threeML.utils.spectrum.binned_spectrum import Quality
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.polynomial import polyfit, unbinned_polyfit, Polynomial
class ReducingNumberOfThreads(Warning):
pass
class ReducingNumberOfSteps(Warning):
pass
class OverLappingIntervals(RuntimeError):
pass
# find out how many splits we need to make
def ceildiv(a, b):
return -(-a // b)
class TimeSeries(object):
def __init__(self, start_time, stop_time, n_channels, native_quality=None,
first_channel=1, ra=None, dec=None, mission=None, instrument=None, verbose=True, edges=None):
"""
The EventList is a container for event data that is tagged in time and in PHA/energy. It handles event selection,
temporal polynomial fitting, temporal binning, and exposure calculations (in subclasses). Once events are selected
and/or polynomials are fit, the selections can be extracted via a PHAContainer which is can be read by an OGIPLike
instance and translated into a PHA instance.
:param n_channels: Number of detector channels
:param start_time: start time of the event list
:param stop_time: stop time of the event list
:param first_channel: where detchans begin indexing
:param rsp_file: the response file corresponding to these events
:param arrival_times: list of event arrival times
:param energies: list of event energies or pha channels
:param native_quality: native pha quality flags
:param edges: The histogram boundaries if not specified by a response
:param mission:
:param instrument:
:param verbose:
:param ra:
:param dec:
"""
self._verbose = verbose
self._n_channels = n_channels
self._first_channel = first_channel
self._native_quality = native_quality
# we haven't made selections yet
self._time_intervals = None
self._poly_intervals = None
self._counts = None
self._exposure = None
self._poly_counts = None
self._poly_count_err = None
self._poly_selected_counts= None
self._poly_exposure = None
# ebounds for objects w/o a response
self._edges = edges
if native_quality is not None:
assert len(
native_quality) == n_channels, "the native quality has length %d but you specified there were %d channels" % (
len(native_quality), n_channels)
self._start_time = start_time
self._stop_time = stop_time
# name the instrument if there is not one
if instrument is None:
custom_warnings.warn('No instrument name is given. Setting to UNKNOWN')
self._instrument = "UNKNOWN"
else:
self._instrument = instrument
if mission is None:
custom_warnings.warn('No mission name is given. Setting to UNKNOWN')
self._mission = "UNKNOWN"
else:
self._mission = mission
self._user_poly_order = -1
self._time_selection_exists = False
self._poly_fit_exists = False
self._fit_method_info = {"bin type": None, 'fit method': None}
def set_active_time_intervals(self, *args):
raise RuntimeError("Must be implemented in subclass")
@property
def poly_fit_exists(self):
return self._poly_fit_exists
@property
def n_channels(self):
return self._n_channels
@property
def poly_intervals(self):
return self._poly_intervals
@property
def polynomials(self):
""" Returns polynomial is they exist"""
if self._poly_fit_exists:
return self._polynomials
else:
RuntimeError('A polynomial fit has not been made.')
def get_poly_info(self):
"""
Return a pandas panel frame with the polynomial coeffcients
and errors
Returns:
a DataFrame
"""
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.error)
df_coeff = pd.DataFrame(coeff)
df_err = pd.DataFrame(err)
# print('Coefficients')
#
# display(df_coeff)
#
# print('Coefficient Error')
#
# display(df_err)
pan = {'coefficients': df_coeff, 'error': df_err}
return pan
else:
RuntimeError('A polynomial fit has not been made.')
def get_total_poly_count(self, start, stop, mask=None):
"""
Get the total poly counts
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral(start, stop)
return total_counts
def get_total_poly_error(self, start, stop, mask=None):
"""
Get the total poly error
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral_error(start, stop) ** 2
return np.sqrt(total_counts)
@property
def bins(self):
if self._temporal_binner is not None:
return self._temporal_binner
else:
raise RuntimeError('This EventList has no binning specified')
def __set_poly_order(self, value):
""" Set poly order only in allowed range and redo fit """
assert type(value) is int, "Polynomial order must be integer"
assert -1 <= value <= 4, "Polynomial order must be 0-4 or -1 to have it determined"
self._user_poly_order = value
if self._poly_fit_exists:
print('Refitting background with new polynomial order (%d) and existing selections' % value)
if self._time_selection_exists:
self.set_polynomial_fit_interval(*self._poly_intervals.to_string().split(','), unbinned=self._unbinned)
else:
RuntimeError("This is a bug. Should never get here")
def ___set_poly_order(self, value):
""" Indirect poly order setter """
self.__set_poly_order(value)
def __get_poly_order(self):
""" get the poly order """
return self._optimal_polynomial_grade
def ___get_poly_order(self):
""" Indirect poly order getter """
return self.__get_poly_order()
poly_order = property(___get_poly_order, ___set_poly_order,
doc="Get or set the polynomial order")
@property
def time_intervals(self):
"""
the time intervals of the events
:return:
"""
return self._time_intervals
def exposure_over_interval(self, tmin, tmax):
""" calculate the exposure over a given interval """
raise RuntimeError("Must be implemented in sub class")
def counts_over_interval(self, start, stop):
"""
return the number of counts in the selected interval
:param start: start of interval
:param stop: stop of interval
:return:
"""
# this will be a boolean list and the sum will be the
# number of events
raise RuntimeError("Must be implemented in sub class")
def count_per_channel_over_interval(self, start, stop):
"""
:param start:
:param stop:
:return:
"""
raise RuntimeError("Must be implemented in sub class")
def set_polynomial_fit_interval(self, *time_intervals, **options):
"""Set the time interval to fit the background.
Multiple intervals can be input as separate arguments
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
set_polynomial_fit_interval("-10.0-0.0","10.-15.")
:param time_intervals: intervals to fit on
:param options:
"""
# Find out if we want to binned or unbinned.
# TODO: add the option to config file
if 'unbinned' in options:
unbinned = options.pop('unbinned')
assert type(unbinned) == bool, 'unbinned option must be True or False'
else:
# assuming unbinned
# could use config file here
# unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']
unbinned = True
# we create some time intervals
poly_intervals = TimeIntervalSet.from_strings(*time_intervals)
# adjust the selections to the data
new_intervals = []
self._poly_selected_counts = []
self._poly_exposure = 0.
for i, time_interval in enumerate(poly_intervals):
t1 = time_interval.start_time
t2 = time_interval.stop_time
if (self._stop_time <= t1) or (t2 <= self._start_time):
custom_warnings.warn(
"The time interval %f-%f is out side of the arrival times and will be dropped" % (
t1, t2))
else:
if t1 < self._start_time:
custom_warnings.warn(
"The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f" % (
t1, t2, self._start_time, self._start_time, t2))
t1 = self._start_time # + 1
if t2 > self._stop_time:
custom_warnings.warn(
"The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f" % (
t1, t2, self._stop_time, t1, self._stop_time))
t2 = self._stop_time # - 1.
new_intervals.append('%f-%f' % (t1, t2))
self._poly_selected_counts.append(self.count_per_channel_over_interval(t1,t2))
self._poly_exposure += self.exposure_over_interval(t1,t2)
# make new intervals after checks
poly_intervals = TimeIntervalSet.from_strings(*new_intervals)
self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
# set the poly intervals as an attribute
self._poly_intervals = poly_intervals
# Fit the events with the given intervals
if unbinned:
self._unbinned = True # keep track!
self._unbinned_fit_polynomials()
else:
self._unbinned = False
self._fit_polynomials()
# we have a fit now
self._poly_fit_exists = True
if self._verbose:
print("%s %d-order polynomial fit with the %s method" % (
self._fit_method_info['bin type'], self._optimal_polynomial_grade, self._fit_method_info['fit method']))
print('\n')
# recalculate the selected counts
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
def get_information_dict(self, use_poly=False, extract=False):
"""
Return a PHAContainer that can be read by different builders
:param use_poly: (bool) choose to build from the polynomial fits
"""
if not self._time_selection_exists:
raise RuntimeError('No time selection exists! Cannot calculate rates')
if extract:
is_poisson = True
counts_err = None
counts = self._poly_selected_counts
rates = old_div(self._counts, self._poly_exposure)
rate_err = None
exposure = self._poly_exposure
elif use_poly:
is_poisson = False
counts_err = self._poly_count_err
counts = self._poly_counts
rate_err = old_div(self._poly_count_err, self._exposure)
rates = old_div(self._poly_counts, self._exposure)
exposure = self._exposure
# removing negative counts
idx = counts < 0.
counts[idx] = 0.
counts_err[idx] = 0.
rates[idx] = 0.
rate_err[idx] = 0.
else:
is_poisson = True
counts_err = None
counts = self._counts
rates = old_div(self._counts, self._exposure)
rate_err = None
exposure = self._exposure
if self._native_quality is None:
quality = np.zeros_like(counts, dtype=int)
else:
quality = self._native_quality
container_dict = {}
container_dict['instrument'] = self._instrument
container_dict['telescope'] = self._mission
container_dict['tstart'] = self._time_intervals.absolute_start_time
container_dict['telapse'] = self._time_intervals.absolute_stop_time - self._time_intervals.absolute_start_time
container_dict['channel'] = np.arange(self._n_channels) + self._first_channel
container_dict['counts'] = counts
container_dict['counts error'] = counts_err
container_dict['rates'] = rates
container_dict['rate error'] = rate_err
container_dict['edges'] = self._edges
# check to see if we already have a quality object
if isinstance(quality, Quality):
container_dict['quality'] = quality
else:
container_dict['quality'] = Quality.from_ogip(quality)
# TODO: make sure the grouping makes sense
container_dict['backfile'] = 'NONE'
container_dict['grouping'] = np.ones(self._n_channels)
container_dict['exposure'] = exposure
# container_dict['response'] = self._response
return container_dict
def __repr__(self):
"""
Examine the currently selected info as well other things.
"""
return self._output().to_string()
def _output(self):
info_dict = collections.OrderedDict()
for i, interval in enumerate(self.time_intervals):
info_dict['active selection (%d)' % (i + 1)] = interval.__repr__()
info_dict['active deadtime'] = self._active_dead_time
if self._poly_fit_exists:
for i, interval in enumerate(self.poly_intervals):
info_dict['polynomial selection (%d)' % (i + 1)] = interval.__repr__()
info_dict['polynomial order'] = self._optimal_polynomial_grade
info_dict['polynomial fit type'] = self._fit_method_info['bin type']
info_dict['polynomial fit method'] = self._fit_method_info['fit method']
return pd.Series(info_dict, index=list(info_dict.keys()))
def _fit_global_and_determine_optimum_grade(self, cnts, bins, exposure):
"""
Provides the ability to find the optimum polynomial grade for *binned* counts by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param cnts: counts per bin
:param bins: the bins used
:param exposure: exposure per bin
:return: polynomial grade
"""
min_grade = 0
max_grade = 4
log_likelihoods = []
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = polyfit(bins, cnts, grade, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array([2 * (x[0] - x[1]) for x in zip(log_likelihoods[:-1], log_likelihoods[1:])])
# print("\ndelta log-likelihoods:")
# for i in range(max_grade):
# print("%s -> %s: delta Log-likelihood = %s" % (i, i + 1, deltaLoglike[i]))
# print("")
delta_threshold = 9.0
mask = (delta_loglike >= delta_threshold)
if (len(mask.nonzero()[0]) == 0):
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _unbinned_fit_global_and_determine_optimum_grade(self, events, exposure):
"""
Provides the ability to find the optimum polynomial grade for *unbinned* events by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param events: an event list
:param exposure: the exposure per event
:return: polynomial grade
"""
# Fit the sum of all the channels to determine the optimal polynomial
# grade
min_grade = 0
max_grade = 4
log_likelihoods = []
t_start = self._poly_intervals.start_times
t_stop = self._poly_intervals.stop_times
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = unbinned_polyfit(events, grade, t_start, t_stop, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array([2 * (x[0] - x[1]) for x in zip(log_likelihoods[:-1], log_likelihoods[1:])])
delta_threshold = 9.0
mask = (delta_loglike >= delta_threshold)
if (len(mask.nonzero()[0]) == 0):
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _fit_polynomials(self):
raise NotImplementedError('this must be implemented in a subclass')
def _unbinned_fit_polynomials(self):
raise NotImplementedError('this must be implemented in a subclass')
def save_background(self, filename, overwrite=False):
"""
save the background to an HD5F
:param filename:
:return:
"""
# make the file name proper
filename = os.path.splitext(filename)
filename = "%s.h5" % filename[0]
filename_sanitized = sanitize_filename(filename)
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists!" % filename_sanitized)
with HDFStore(filename_sanitized) as store:
# extract the polynomial information and save it
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.covariance_matrix)
df_coeff = pd.Series(coeff)
df_err = pd.Series(err)
else:
raise RuntimeError('the polynomials have not been fit yet')
df_coeff.to_hdf(store, 'coefficients')
df_err.to_hdf(store, 'covariance')
store.get_storer('coefficients').attrs.metadata = {'poly_order': self._optimal_polynomial_grade,
'poly_selections': list(zip(self._poly_intervals.start_times,
self._poly_intervals.stop_times)),
'unbinned': self._unbinned,
'fit_method': self._fit_method_info['fit method']}
if self._verbose:
print("\nSaved fitted background to %s.\n" % filename)
def restore_fit(self, filename):
filename_sanitized = sanitize_filename(filename)
with | HDFStore(filename_sanitized) | pandas.HDFStore |
import logging
from operator import itemgetter
from logging.config import dictConfig
from datetime import datetime, timedelta, date
from math import ceil
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
from chinese_calendar import get_holidays
import plotly.graph_objects as go
import numpy as np
from keysersoze.models import (
Deal,
Asset,
AssetMarketHistory,
)
from keysersoze.utils import (
get_accounts_history,
get_accounts_summary,
)
from keysersoze.apps.app import APP
from keysersoze.apps.utils import make_card_component
LOGGER = logging.getLogger(__name__)
dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(filename)s:%(lineno)s: %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
"stream": "ext://sys.stdout",
},
},
'loggers': {
'__main__': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'keysersoze': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
pd.options.mode.chained_assignment = 'raise'
COLUMN_MAPPINGS = {
'code': '代码',
'name': '名称',
'ratio': '占比',
'return_rate': '收益率',
'cost': '投入',
'avg_cost': '成本',
'price': '价格',
'price_date': '价格日期',
'amount': '份额',
'money': '金额',
'return': '收益',
'action': '操作',
'account': '账户',
'date': '日期',
'time': '时间',
'fee': '费用',
'position': '仓位',
'day_return': '日收益',
}
FORMATS = {
'价格日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'日期': {'type': 'datetime', 'format': Format(nully='N/A')},
'时间': {'type': 'datetime', 'format': Format(nully='N/A')},
'占比': {'type': 'numeric', 'format': Format(scheme='%', precision=2)},
'收益率': {'type': 'numeric', 'format': Format(nully='N/A', scheme='%', precision=2)},
'份额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'金额': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'费用': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'投入': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
'成本': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'价格': {'type': 'numeric', 'format': Format(nully='N/A', precision=4, scheme=Scheme.fixed)},
'收益': {'type': 'numeric', 'format': Format(nully='N/A', precision=2, scheme=Scheme.fixed)},
}
ACCOUNT_PRIORITIES = {
'长期投资': 0,
'长赢定投': 1,
'U定投': 2,
'投资实证': 3,
'稳健投资': 4,
'证券账户': 6,
'蛋卷基金': 7,
}
all_accounts = [deal.account for deal in Deal.select(Deal.account).distinct()]
all_accounts.sort(key=lambda name: ACCOUNT_PRIORITIES.get(name, 1000))
layout = html.Div(
[
dcc.Store(id='assets'),
dcc.Store(id='stats'),
dcc.Store(id='accounts_history'),
dcc.Store(id='index_history'),
dcc.Store(id='deals'),
dcc.Store(id='start-date'),
dcc.Store(id='end-date'),
html.H3('投资账户概览'),
dbc.Checklist(
id='show-money',
options=[{'label': '显示金额', 'value': 'show'}],
value=[],
switch=True,
),
html.Hr(),
dbc.InputGroup(
[
dbc.InputGroupAddon('选择账户', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='checklist',
options=[{'label': a, 'value': a} for a in all_accounts],
value=[all_accounts[0]],
inline=True,
className='my-auto'
),
],
className='my-2',
),
html.Div(id='account-summary'),
html.Br(),
dbc.Tabs([
dbc.Tab(
label='资产走势',
children=[
dcc.Graph(
id='asset-history-chart',
config={
'displayModeBar': False,
}
),
]
),
dbc.Tab(
label='累计收益走势',
children=[
dcc.Graph(
id="total-return-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='累计收益率走势',
children=[
dbc.InputGroup(
[
dbc.InputGroupAddon('比较基准', addon_type='prepend', className='mr-2'),
dbc.Checklist(
id='compare',
options=[
{'label': '中证全指', 'value': '000985.CSI'},
{'label': '上证指数', 'value': '000001.SH'},
{'label': '深证成指', 'value': '399001.SZ'},
{'label': '沪深300', 'value': '000300.SH'},
{'label': '中证500', 'value': '000905.SH'},
],
value=['000985.CSI'],
inline=True,
className='my-auto'
),
],
className='my-2',
),
dcc.Graph(
id="return-curve-chart",
config={
'displayModeBar': False
}
),
]
),
dbc.Tab(
label='日收益历史',
children=[
dcc.Graph(
id="day-return-chart",
config={
'displayModeBar': False
},
),
]
),
]),
html.Center(
[
dbc.RadioItems(
id="date-range",
className='btn-group',
labelClassName='btn btn-light border',
labelCheckedClassName='active',
options=[
{"label": "近一月", "value": "1m"},
{"label": "近三月", "value": "3m"},
{"label": "近半年", "value": "6m"},
{"label": "近一年", "value": "12m"},
{"label": "今年以来", "value": "thisyear"},
{"label": "本月", "value": "thismonth"},
{"label": "本周", "value": "thisweek"},
{"label": "所有", "value": "all"},
{"label": "自定义", "value": "customized"},
],
value="thisyear",
),
],
className='radio-group',
),
html.Div(
id='customized-date-range-container',
children=[
dcc.RangeSlider(
id='customized-date-range',
min=2018,
max=2022,
step=None,
marks={year: str(year) for year in range(2018, 2023)},
value=[2018, 2022],
)
],
className='my-auto ml-0 mr-0',
style={'max-width': '100%', 'display': 'none'}
),
html.Hr(),
dbc.Tabs([
dbc.Tab(
label='持仓明细',
children=[
html.Br(),
dbc.Checklist(
id='show-cleared',
options=[{'label': '显示清仓品种', 'value': 'show'}],
value=[],
switch=True,
),
html.Div(id='assets_cards'),
html.Center(
[
dbc.RadioItems(
id="assets-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
dbc.Tab(
label='交易记录',
children=[
html.Br(),
html.Div(id='deals_table'),
html.Center(
[
dbc.RadioItems(
id="deals-pagination",
className="btn-group",
labelClassName="btn btn-secondary",
labelCheckedClassName="active",
options=[
{"label": "1", "value": 0},
],
value=0,
),
],
className='radio-group',
),
]
),
])
],
)
@APP.callback(
[
dash.dependencies.Output('assets', 'data'),
dash.dependencies.Output('stats', 'data'),
dash.dependencies.Output('accounts_history', 'data'),
dash.dependencies.Output('index_history', 'data'),
dash.dependencies.Output('deals', 'data'),
dash.dependencies.Output('deals-pagination', 'options'),
dash.dependencies.Output('assets-pagination', 'options'),
],
[
dash.dependencies.Input('checklist', 'value'),
dash.dependencies.Input('compare', 'value'),
],
)
def update_after_check(accounts, index_codes):
accounts = accounts or all_accounts
summary_data, assets_data = get_accounts_summary(accounts)
history = get_accounts_history(accounts).to_dict('records')
history.sort(key=itemgetter('account', 'date'))
index_history = []
for index_code in index_codes:
index = Asset.get(zs_code=index_code)
for record in index.history:
index_history.append({
'account': index.name,
'date': record.date,
'price': record.close_price
})
index_history.sort(key=itemgetter('account', 'date'))
deals = []
for record in Deal.get_deals(accounts):
deals.append({
'account': record.account,
'time': record.time,
'code': record.asset.zs_code,
'name': record.asset.name,
'action': record.action,
'amount': record.amount,
'price': record.price,
'money': record.money,
'fee': record.fee,
})
deals.sort(key=itemgetter('time'), reverse=True)
valid_deals_count = 0
for item in deals:
if item['action'] == 'fix_cash':
continue
if item['code'] == 'CASH' and item['action'] == 'reinvest':
continue
valid_deals_count += 1
pagination_options = [
{'label': idx + 1, 'value': idx}
for idx in range(ceil(valid_deals_count / 100))
]
assets_pagination_options = []
return (
assets_data,
summary_data,
history,
index_history,
deals,
pagination_options,
assets_pagination_options
)
@APP.callback(
dash.dependencies.Output('account-summary', 'children'),
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_summary(stats, show_money):
body_content = []
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '总资产',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['money'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '日收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['day_return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['day_return_rate'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '累计收益',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['return'],
'color': 'bg-primary',
},
{
'item_cls': html.P,
'type': 'percent',
'content': stats['return_rate'] if stats['amount'] > 0 else 'N/A(已清仓)',
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '年化收益率',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['annualized_return'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True,
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '现金',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'money',
'content': stats['cash'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '仓位',
'color': 'bg-primary',
},
{
'item_cls': html.H4,
'type': 'percent',
'content': stats['position'],
'color': 'bg-primary',
},
],
show_money=show_money,
inverse=True
)
)
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto',
color='primary',
)
return [card]
@APP.callback(
dash.dependencies.Output('assets_cards', 'children'),
[
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('show-cleared', 'value'),
]
)
def update_assets_table(assets_data, show_money, show_cleared):
cards = [html.Hr()]
for row in assets_data:
if not show_cleared and abs(row['amount']) <= 0.001:
continue
if row["code"] in ('CASH', 'WZZNCK'):
continue
cards.append(make_asset_card(row, show_money))
cards.append(html.Br())
return cards
def make_asset_card(asset_info, show_money=True):
def get_color(value):
if not isinstance(value, (float, int)):
return None
if value > 0:
return 'text-danger'
if value < 0:
return 'text-success'
return None
header = dbc.CardHeader([
html.H5(
html.A(
f'{asset_info["name"]}({asset_info["code"]})',
href=f'/asset/{asset_info["code"].replace(".", "").lower()}',
target='_blank'
),
className='mb-0'
),
html.P(f'更新日期 {asset_info["price_date"]}', className='mb-0'),
])
body_content = []
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '持有金额/份额'},
{'item_cls': html.H4, 'type': 'money', 'content': asset_info['money']},
{'item_cls': html.P, 'type': 'amount', 'content': asset_info['amount']}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '日收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['day_return'],
'color': get_color(asset_info['day_return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['day_return_rate'],
'color': get_color(asset_info['day_return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '现价/成本'},
{'item_cls': html.H4, 'type': 'price', 'content': asset_info['price']},
{'item_cls': html.P, 'type': 'price', 'content': asset_info['avg_cost'] or 'N/A'}
],
show_money=show_money,
)
)
asset = Asset.get(zs_code=asset_info['code'])
prices = []
for item in asset.history.order_by(AssetMarketHistory.date.desc()).limit(10):
if item.close_price is not None:
prices.append({
'date': item.date,
'price': item.close_price,
})
else:
prices.append({
'date': item.date,
'price': item.nav,
})
if len(prices) >= 10:
break
prices.sort(key=itemgetter('date'))
df = pd.DataFrame(prices)
df['date'] = pd.to_datetime(df['date'])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['price'],
showlegend=False,
marker={'color': 'orange'},
mode='lines+markers',
)
)
fig.update_layout(
width=150,
height=100,
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
yaxis={'showticklabels': False, 'showgrid': False, 'fixedrange': True},
)
fig.update_xaxes(
rangebreaks=[
{'bounds': ["sat", "mon"]},
{
'values': get_holidays(df.date.min(), df.date.max(), False)
}
]
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '十日走势'},
{
'item_cls': None,
'type': 'figure',
'content': fig
}
],
show_money=show_money
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '累计收益'},
{
'item_cls': html.H4,
'type': 'money',
'content': asset_info['return'],
'color': get_color(asset_info['return']),
},
{
'item_cls': html.P,
'type': 'percent',
'content': asset_info['return_rate'],
'color': get_color(asset_info['return']),
}
],
show_money=show_money,
)
)
body_content.append(
make_card_component(
[
{'item_cls': html.P, 'type': 'text', 'content': '占比'},
{'item_cls': html.H4, 'type': 'percent', 'content': asset_info['position']},
],
show_money=show_money,
)
)
card = dbc.Card(
[
header,
dbc.CardBody(
dbc.Row(
[dbc.Col([card_component]) for card_component in body_content],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('return-curve-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('index_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_return_chart(accounts_history, index_history, start_date, end_date):
df = pd.DataFrame(accounts_history)[['amount', 'account', 'date', 'nav']]
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
df = df[df['account'] == '总计']
df['account'] = '我的'
fig = go.Figure()
if len(df) > 0:
start_nav = float(df[df['date'] == df['date'].min()].nav)
df.loc[:, 'nav'] = df['nav'] / start_nav - 1.0
df.rename(columns={'nav': 'return'}, inplace=True)
df = df.drop(df[df['amount'] <= 0].index)[['account', 'date', 'return']]
start_date = df.date.min()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['return'],
marker={'color': 'orange'},
name='我的',
mode='lines',
)
)
index_df = None
if index_history:
index_history = pd.DataFrame(index_history)
index_history['date'] = pd.to_datetime(index_history['date'])
if start_date is not None:
index_history = index_history[index_history['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
index_history = index_history[index_history['date'] < pd.to_datetime(end_date)]
index_names = set(index_history.account)
for name in index_names:
cur_df = index_history[index_history['account'] == name].copy()
cur_df.loc[:, 'price'] = cur_df['price'] / cur_df.iloc[0].price - 1.0
cur_df.rename(columns={'price': 'return'}, inplace=True)
if index_df is None:
index_df = cur_df
else:
index_df = pd.concat([index_df, cur_df], ignore_index=True)
fig.add_trace(
go.Scatter(x=cur_df['date'], y=cur_df['return'], name=name)
)
fig.update_layout(
legend_title_text='',
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01, font_size=14),
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
yaxis_tickformat='%',
xaxis_tickformat="%m/%d\n%Y",
hovermode='x unified',
xaxis={'fixedrange': True},
yaxis={'fixedrange': True},
)
return fig
@APP.callback(
[
dash.dependencies.Output('profit_detail_graph', 'figure'),
dash.dependencies.Output('loss_detail_graph', 'figure'),
dash.dependencies.Output('quit_profits_table', 'columns'),
dash.dependencies.Output('quit_profits_table', 'data'),
dash.dependencies.Output('quit_loss_table', 'columns'),
dash.dependencies.Output('quit_loss_table', 'data'),
],
[
dash.dependencies.Input('stats', 'data'),
dash.dependencies.Input('assets', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def update_return_details(stats_data, assets_data, show_money):
stats = stats_data
total_return = stats['money'] - stats['amount']
assets = pd.DataFrame(assets_data)
profits, loss, total_profit = [], [], 0
for _, row in assets.iterrows():
if row['code'] == 'CASH':
continue
return_value = row['return']
if abs(return_value) < 0.001:
continue
if return_value > 0:
profits.append({
'code': row['code'],
'name': row['name'],
'branch': '盈利',
'return_value': return_value,
'category': '实盈' if row['amount'] <= 0 else '浮盈',
})
else:
loss.append({
'code': row['code'],
'name': row['name'],
'branch': '亏损',
'return_value': abs(return_value),
'category': '实亏' if row['amount'] <= 0 else '浮亏',
})
total_profit += return_value
if abs(total_return - total_profit) > 0.001:
profits.append({
'category': '实盈',
'code': 'CASH',
'name': '现金',
'branch': '盈利',
'return_value': round(total_return - total_profit, 2),
})
if not show_money:
profit_sum = sum([item['return_value'] for item in profits])
for item in profits:
item['return_value'] = round(10000 * item['return_value'] / profit_sum, 2)
loss_sum = sum([item['return_value'] for item in loss])
for item in loss:
item['return_value'] = round(10000 * item['return_value'] / loss_sum, 2)
profits = profits or [{
'code': '',
'name': '',
'branch': '盈利',
'category': '实盈',
'return_value': 0,
}]
profits = pd.DataFrame(profits)
if not show_money:
profits.loc[:, 'return_value'] = profits['return_value'] / 10000
profit_fig = px.treemap(
profits,
path=['branch', 'category', 'name'],
values='return_value',
branchvalues="total",
color='name',
)
profit_fig.update_layout(margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4})
loss = loss or [{
'code': '',
'name': '',
'branch': '亏损: 无',
'category': '实亏',
'return_value': 0,
}]
loss = pd.DataFrame(loss)
if not show_money:
loss.loc[:, 'return_value'] = loss['return_value'] / 10000
loss_fig = px.treemap(
loss,
path=['branch', 'category', 'name'],
values='return_value',
branchvalues="total",
color='name',
)
loss_fig.update_layout(margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4})
df = profits[['code', 'name', 'return_value']]
df = df.rename(columns={'return_value': '盈利', **COLUMN_MAPPINGS})
columns1, columns2 = [], []
for name in df.columns:
if name != '盈利':
columns1.append({'id': name, 'name': name})
columns2.append({'id': name, 'name': name})
continue
column = {'type': 'numeric'}
if not show_money:
column['format'] = Format(scheme='%', precision=2)
else:
column['format'] = Format(scheme=Scheme.fixed, precision=2)
columns1.append({'id': '盈利', 'name': '盈利', **column})
columns2.append({'id': '亏损', 'name': '亏损', **column})
data1 = df.to_dict('records')
data1.sort(key=itemgetter('盈利'), reverse=True)
df = loss[['code', 'name', 'return_value']]
df = df.rename(columns={'return_value': '亏损', **COLUMN_MAPPINGS})
data2 = [item for item in df.to_dict('records') if item['名称']]
data2.sort(key=itemgetter('亏损'), reverse=True)
return profit_fig, loss_fig, columns1, data1, columns2, data2
@APP.callback(
dash.dependencies.Output('deals_table', 'children'),
[
dash.dependencies.Input('deals', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('deals-pagination', 'value'),
]
)
def add_deal_record(deals, show_money, page_num):
cards = []
deals = [
item for item in deals
if item['action'] != 'fix_cash' and not (
item['code'] == 'CASH' and item['action'] == 'reinvest'
)
]
for row in deals[page_num * 100:(page_num + 1) * 100]:
cards.append(make_deal_card(row, show_money))
cards.append(html.Br())
return cards
def make_deal_card(deal_info, show_money=False):
action_mappings = {
'transfer_in': '转入',
'transfer_out': '转出',
'buy': '买入',
'sell': '卖出',
'reinvest': '红利再投资',
'bonus': '现金分红',
'spin_off': '拆分/合并'
}
body_content = []
if deal_info['code'] not in ('CASH', 'WZZNCK'):
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': f'{action_mappings[deal_info["action"]]}',
},
{
'item_cls': html.H5,
'type': 'text',
'content': html.A(
f'{deal_info["name"]}({deal_info["code"]})',
href=f'/asset/{deal_info["code"].replace(".", "").lower()}',
target='_blank'
),
},
{
'item_cls': html.P,
'type': 'text',
'content': pd.to_datetime(deal_info['time']).strftime('%Y-%m-%d %H:%M:%S'),
}
],
show_money=show_money
)
)
else:
body_content.append(
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': f'{action_mappings[deal_info["action"]]}',
},
{
'item_cls': html.H5,
'type': 'text',
'content': deal_info['name'],
},
{
'item_cls': html.P,
'type': 'text',
'content': pd.to_datetime(deal_info['time']).strftime('%Y-%m-%d %H:%M:%S'),
}
],
show_money=show_money
)
)
body_content.extend([
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '份额/价格',
},
{
'item_cls': html.H5,
'type': 'amount',
'content': deal_info['amount'],
},
{
'item_cls': html.P,
'type': 'price',
'content': deal_info['price'],
}
],
show_money=show_money
),
make_card_component(
[
{
'item_cls': html.P,
'type': 'text',
'content': '金额/费用',
},
{
'item_cls': html.H5,
'type': 'money',
'content': deal_info['money'],
},
{
'item_cls': html.P,
'type': 'money',
'content': deal_info['fee'],
}
],
show_money=show_money
)
])
card = dbc.Card(
[
dbc.CardBody(
dbc.Row(
[
dbc.Col([card_component], width=6 if idx == 0 else 3)
for idx, card_component in enumerate(body_content)
],
),
className='py-2',
)
],
className='my-auto'
)
return card
@APP.callback(
dash.dependencies.Output('customized-date-range-container', 'style'),
dash.dependencies.Input('date-range', 'value'),
)
def toggle_datepicker(date_range):
if date_range == 'customized':
return {'display': 'block'}
return {'display': 'none'}
@APP.callback(
[
dash.dependencies.Output('start-date', 'data'),
dash.dependencies.Output('end-date', 'data'),
],
[
dash.dependencies.Input('date-range', 'value'),
dash.dependencies.Input('customized-date-range', 'value'),
]
)
def update_return_range(date_range, customized_date_range):
start_date, end_date = None, None
if date_range == '1m':
start_date = (datetime.now() - timedelta(days=30)).date()
elif date_range == '3m':
start_date = (datetime.now() - timedelta(days=60)).date()
elif date_range == '6m':
start_date = (datetime.now() - timedelta(days=180)).date()
elif date_range == '12m':
start_date = (datetime.now() - timedelta(days=365)).date()
elif date_range == 'thisyear':
start_date = datetime.now().replace(month=1, day=1).date()
elif date_range == 'thismonth':
start_date = datetime.now().replace(day=1).date()
elif date_range == 'thisweek':
today = datetime.now().date()
start_date = today - timedelta(days=today.weekday())
elif date_range == 'customized' and customized_date_range:
start_year, end_year = customized_date_range
start_date = date(start_year, 1, 1)
end_date = date(end_year, 1, 1)
return start_date, end_date
@APP.callback(
dash.dependencies.Output('asset-history-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('show-money', 'value'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
]
)
def draw_asset_history(accounts_history, show_money, start_date, end_date):
accounts_history.sort(key=itemgetter('date'))
df = pd.DataFrame(accounts_history)
df = df[df['account'] == '总计']
df.date = pd.to_datetime(df.date)
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
if not show_money:
df.loc[:, "amount"] = df.amount / accounts_history[0]['amount']
df.loc[:, "money"] = df.money / accounts_history[0]['amount']
df["color"] = np.where(df.money > df.amount, 'red', 'green')
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df.date,
y=df.amount,
name='总投入',
marker={'color': 'green'},
mode='lines',
)
)
fig.add_trace(
go.Scatter(
x=df.date,
y=df.money,
name='总资产',
fill='tonexty',
marker={'color': 'red'},
mode='lines',
)
)
fig.update_layout(
xaxis_rangeslider_visible=False,
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01, font_size=14),
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'fixedrange': True},
yaxis={'fixedrange': True},
hovermode='x unified',
)
fig.update_xaxes(tickformat="%m/%d\n%Y")
return fig
@APP.callback(
dash.dependencies.Output('portfolio-analysis', 'children'),
dash.dependencies.Input('assets', 'data'),
)
def update_porfolio_analysis(assets):
return html.P("hello")
@APP.callback(
dash.dependencies.Output('total-return-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def draw_total_return_chart(accounts_history, start_date, end_date, show_money):
df = pd.DataFrame(accounts_history)
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < pd.to_datetime(end_date)]
df = df[df['account'] == '总计']
df.loc[:, 'return'] -= df.iloc[0]['return']
df['account'] = '我的'
if not show_money:
max_return = df['return'].abs().max()
df.loc[:, 'return'] = df['return'] / max_return
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df['date'],
y=df['return'],
marker={'color': 'orange'},
mode='lines',
)
)
max_idx = df['return'].argmax()
fig.add_annotation(
x=df.iloc[max_idx]['date'],
y=df.iloc[max_idx]['return'],
text=f'最大值: {df.iloc[max_idx]["return"]:0.2f}',
showarrow=True,
arrowhead=1
)
fig.update_layout(
legend_title_text='',
xaxis_tickformat='%m/%d\n%Y',
margin={'l': 4, 'r': 4, 'b': 20, 't': 10, 'pad': 4},
xaxis={'fixedrange': True},
yaxis={'fixedrange': True},
hovermode='x unified',
)
return fig
@APP.callback(
dash.dependencies.Output('day-return-chart', 'figure'),
[
dash.dependencies.Input('accounts_history', 'data'),
dash.dependencies.Input('start-date', 'data'),
dash.dependencies.Input('end-date', 'data'),
dash.dependencies.Input('show-money', 'value')
]
)
def draw_day_return_chart(accounts_history, start_date, end_date, show_money):
df = pd.DataFrame(accounts_history)
df['date'] = pd.to_datetime(df['date'])
if start_date is not None:
df = df[df['date'] >= pd.to_datetime(start_date)]
if end_date is not None:
df = df[df['date'] < | pd.to_datetime(end_date) | pandas.to_datetime |
# LSA
import pandas as pd
import numpy as np
from scipy import linalg
#import set
edata = pd.read_csv("docword.enron.txt", skiprows=3, sep = ' ', header=None)
evocab = pd.read_csv("vocab.enron.txt", header=None)
evocab.columns = ['word']
edata.columns = ['docid','wordid','freq']
# Taking a sample data set
edata = edata.iloc[:10000,:]
evocab.index = evocab.index + 1
wc = edata.groupby('wordid')['freq'].sum()
# Applying pivot
bag_of_words =edata.pivot(index='docid', columns='wordid', values='freq')
bag_of_words = bag_of_words.fillna(0)
sparse = bag_of_words.to_sparse(fill_value=0)
U,s,V = linalg.svd(sparse,full_matrices=False)
red_U = U[:,:100]
red_V = V[:100,:]
red_s = np.diag(s[:100])
reconstructedMatrix = np.dot(np.dot(red_U,red_s),red_V)
df_trans = | pd.DataFrame(reconstructedMatrix,columns=bag_of_words.columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
metadata = pd.read_csv('pcawg_download/WGS.metadata.tsv', sep='\t')
histo_data = pd.read_excel('pcawg_download/pcawg_specimen_histology_August2016_v9.xlsx')
match_cohort = pd.read_excel('pcawg_download/tumour_subtype_consolidation_map.tsv.xlsx', sheet_name=1)
match_cohort = match_cohort.dropna(subset=['Tier 2 (organ system)'])
match_cohort2 = match_cohort.join(
match_cohort.pop('Contributing projects').str.split(',',expand=True))
cols = match_cohort.columns.to_list()
match_cohort_ext = pd.melt(match_cohort2, id_vars=cols, value_name='cohort').\
dropna(subset=['cohort'])
TCGA_cohorts = match_cohort_ext[match_cohort_ext.cohort.str.contains('US')]
manual_additions = {'Abbreviation': ['Eso-AdenoCA', 'Panc-AdenoCA'],
'cohort': ['ESCA-US', 'PAAD-US']}
TCGA_cohorts_complete = pd.concat(
[TCGA_cohorts,
pd.DataFrame(manual_additions, columns=TCGA_cohorts.columns)])
missing_cohorts = set(match_cohort_ext.Abbreviation).\
difference(TCGA_cohorts_complete.Abbreviation)
'''
{'Biliary-AdenoCA',
'Bone-Benign',
'Bone-Epith',
'Bone-Osteosarc',
'CNS-Medullo',
'CNS-PiloAstro',
'Lymph-CLL',
'Myeloid-MPN',
'Panc-Endocrine'}
'''
TCGA_signature_match = | pd.read_csv('external_data/curated_match_signature_cancertype_tcgawes_literature.csv', sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 10:31:31 2021
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 11:25:22 2021
@author: Administrator
"""
import h5py
# from pyram.PyRAM import PyRAM
from scipy import interpolate
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import glob
import os
import sys
os.chdir(r'D:\passive_acoustics\propagation_modelling')
import gsw
from netCDF4 import Dataset
import pandas as pd
import cartopy
import cartopy.crs as ccrs
from scipy.ndimage import gaussian_filter
import arlpy.uwapm as pm
modelfrec=500
# load data and slice out region of interest
# read mapdata
latlim=[-62,-56]
lonlim=[-(46+5),-(46-5)]
spacer=1
gebcofile=r"C:\Users\a5278\Documents\gebco_2020_netcdf\GEBCO_2020.nc"
gebco = Dataset(gebcofile, mode='r')
g_lons = gebco.variables['lon'][:]
g_lon_inds = np.where((g_lons>=lonlim[0]) & (g_lons<=lonlim[1]))[0]
# jump over entries to reduce data
g_lon_inds=g_lon_inds[::spacer]
g_lons = g_lons[g_lon_inds]
g_lats = gebco.variables['lat'][:]
g_lat_inds = np.where((g_lats>=latlim[0]) & (g_lats<=latlim[1]))[0]
# jump over entries to reduce data
g_lat_inds=g_lat_inds[::spacer]
g_lats = g_lats[g_lat_inds]
d = gebco.variables['elevation'][g_lat_inds, g_lon_inds]
gebco.close()
#%% get bathymetry slices
import pyresample
lo,la=np.meshgrid(g_lons, g_lats)
grid = pyresample.geometry.GridDefinition(lats=la, lons=lo)
m_loc=[-( 45+57.548/60) , -(60+24.297/60)]
from pyproj import Geod
geod = Geod("+ellps=WGS84")
bearings=np.arange(360)
bathy_dict={}
points_lat=pd.DataFrame()
points_lon=pd.DataFrame()
for b in bearings:
print(b)
points = geod.fwd_intermediate(lon1=m_loc[0],lat1=m_loc[1],azi1=b,npts=500,del_s=1000 )
p_lon=points[3]
p_lat=points[4]
points_lat=pd.concat( [points_lat,pd.DataFrame(p_lat)],ignore_index=True,axis=1 )
points_lon=pd.concat( [points_lon,pd.DataFrame(p_lon)],ignore_index=True,axis=1 )
swath = pyresample.geometry.SwathDefinition(lons=p_lon, lats=p_lat)
# Determine nearest (w.r.t. great circle distance) neighbour in the grid.
_, _, index_array, distance_array = pyresample.kd_tree.get_neighbour_info(
source_geo_def=grid, target_geo_def=swath, radius_of_influence=500000,
neighbours=1)
# get_neighbour_info() returns indices in the flattened lat/lon grid. Compute
# the 2D grid indices:
index_array_2d = np.unravel_index(index_array, grid.shape)
value = d[index_array_2d[0],index_array_2d[1]]
dvec=np.arange(0,1000*500,1000)
bb=np.transpose(np.array([dvec,-value.data]))
bathy_dict[b]= bb.tolist()
timevec = pd.Series( pd.date_range(start= | pd.Timestamp('2016-01-01') | pandas.Timestamp |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_non_keyword_deprecation():
# GH 41485
s = Series(range(5))
msg = (
"In a future version of pandas all arguments of "
"Series.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.where(s > 1, 10, False)
expected = Series([10, 10, 2, 3, 4])
tm.assert_series_equal(expected, result)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
tm.assert_series_equal(s, expected)
# failures
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[[True, False]] = [0, 2, 3]
msg = (
"NumPy boolean array indexing assignment cannot assign 0 input "
"values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg):
s[[True, False]] = []
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
],
)
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
msg = (
lambda x: f"cannot set using a {x} indexer with a "
"different length than the value"
)
# slice
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:3] = list(range(27))
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
tm.assert_series_equal(s.astype(np.int64), expected)
# slice with step
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:4:2] = list(range(27))
s = Series(list("abcdef"))
s[0:4:2] = list(range(2))
expected = Series([0, "b", 1, "d", "e", "f"])
tm.assert_series_equal(s, expected)
# neg slices
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[:-1] = list(range(27))
s[-3:-1] = list(range(2))
expected = Series(["a", "b", "c", 0, 1, "f"])
tm.assert_series_equal(s, expected)
# list
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(27))
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(2))
# scalar
s = Series(list("abc"))
s[0] = list(range(10))
expected = Series([list(range(10)), "b", "c"])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("size", range(2, 6))
@pytest.mark.parametrize(
"mask", [[True, False, False, False, False], [True, False], [False]]
)
@pytest.mark.parametrize(
"item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]
)
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize(
"box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
)
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series(
[item if use_item else data[i] for i, use_item in enumerate(selection)]
)
s = Series(data)
s[selection] = box(item)
tm.assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
tm.assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
tm.assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
tm.assert_series_equal(rs.dropna(), s[cond])
tm.assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = Series([1, 2, 3])
w = s.where(s > 1, "X")
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, ["X", "Y", "Z"])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, np.array(["X", "Y", "Z"]))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
def test_where_timedelta_coerce():
s = Series([1, 2], dtype="timedelta64[ns]")
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
def test_where_datetime_conversion():
s = Series(date_range("20130102", periods=2))
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
# GH 15701
timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
s = Series([Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
tm.assert_series_equal(rs, expected)
def test_where_dt_tz_values(tz_naive_fixture):
ser1 = Series(
pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
)
ser2 = Series(
pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
)
mask = Series([True, True, False])
result = ser1.where(mask, ser2)
exp = Series(
pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
)
tm.assert_series_equal(exp, result)
def test_where_sparse():
# GH#17198 make sure we dont get an AttributeError for sp_index
ser = Series(pd.arrays.SparseArray([1, 2]))
result = ser.where(ser >= 2, 0)
expected = Series(pd.arrays.SparseArray([0, 2]))
tm.assert_series_equal(result, expected)
def test_where_empty_series_and_empty_cond_having_non_bool_dtypes():
# https://github.com/pandas-dev/pandas/issues/34592
ser = Series([], dtype=float)
result = ser.where([])
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("klass", [Series, pd.DataFrame])
def test_where_categorical(klass):
# https://github.com/pandas-dev/pandas/issues/18888
exp = klass(
pd.Categorical(["A", "A", "B", "B", np.nan], categories=["A", "B", "C"]),
dtype="category",
)
df = klass(["A", "A", "B", "B", "C"], dtype="category")
res = df.where(df != "C")
tm.assert_equal(exp, res)
# TODO(ArrayManager) DataFrame.values not yet correctly returning datetime array
# for categorical with datetime categories
@td.skip_array_manager_not_yet_implemented
def test_where_datetimelike_categorical(tz_naive_fixture):
# GH#37682
tz = tz_naive_fixture
dr = date_range("2001-01-01", periods=3, tz=tz)._with_freq(None)
lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT])
rvals = pd.Categorical([dr[0], pd.NaT, dr[2]])
mask = np.array([True, True, False])
# DatetimeIndex.where
res = lvals.where(mask, rvals)
tm.assert_index_equal(res, dr)
# DatetimeArray.where
res = lvals._data.where(mask, rvals)
tm.assert_datetime_array_equal(res, dr._data)
# Series.where
res = Series(lvals).where(mask, rvals)
tm.assert_series_equal(res, Series(dr))
# DataFrame.where
res = pd.DataFrame(lvals).where(mask[:, None], | pd.DataFrame(rvals) | pandas.DataFrame |
#### FEATURE ENGINEERING MODULE ####
#- Decomposition features
# - PCA
# - ICA
# - TSVD
# - GRP
# - SRP
# - ...
#- Clustering output feaatures
# - KMeans
# - ...
#- Deterministic features
# - Binning
# - ...
from sklearn.cluster import KMeans
import sklearn.decomposition as decomposition
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import sklearn.random_projection as rp
import pandas as pd
class feature_engineering_class():
def __init__(self, train, valid, scaler_method='ss', decomposition_methods=['PCA'], ):
self.t = train
self.v = valid
self.scm = scaler_method
self.dcm = decomposition_methods
def scalers(self):
"""
Standard scaler = 'ss'
MinMax scaler = 'mm'
"""
sc = StandardScaler() if self.scm == 'ss' else MinMaxScaler()
sc.fit(self.t)
return pd.DataFrame(sc.transform(self.t), columns=self.t.columns.values), pd.DataFrame(sc.transform(self.v), columns=self.v.columns.values)
def decomp_various(self, n):
decomp_dfs = {}
decomp_methods = ['PCA', 'FastICA', 'TruncatedSVD', 'GaussianRandomProjection', 'SparseRandomProjection']
for i in decomp_methods:
if i == 'PCA':
decomp_obj = getattr(decomposition, i)
decomp_obj = decomp_obj(n_components=.75)
elif i in ['FastICA', 'TruncatedSVD']:
decomp_obj = getattr(decomposition, i)
decomp_obj = decomp_obj(n_components=n)
else:
decomp_obj = getattr(rp, i)
decomp_obj = decomp_obj(n_components=n, eps=0.3)
# perform the multiple decomposition techniques
t, v = self.scalers()
decomp_obj.fit(train)
decomp_train = pd.DataFrame(decomp_obj.transform(train))
decomp_valid = pd.DataFrame(decomp_obj.transform(valid))
cols = list(set(list(decomp_train)))
cols = [str(i) + '_' + str(s) for s in cols]
decomp_train.columns = cols
decomp_valid.columns = cols
decomp_dfs[i + '_train'] = decomp_train
decomp_dfs[i + '_valid'] = decomp_valid
feat_eng.df = decomp_dfs
return None
def return_combined(self, train, valid):
#self.df
for i in list(self.df.keys()):
if bool(re.search('train', i)):
train = pd.concat([train.reset_index(drop=True), self.df[i]], axis=1)
else:
valid = pd.concat([valid.reset_index(drop=True), self.df[i]], axis=1)
return train, valid
def kmeans_clusterer(train_df, valid_df, n):
clusterer = KMeans(n, random_state=1, init='k-means++')
# fit the clusterer
clusterer.fit(train_df)
train_clusters = clusterer.predict(train_df)
valid_clusters = clusterer.predict(valid_df)
return train_clusters, valid_clusters
def kmeans_feats(train_df, valid_df, m=5):
print('m is ', m, '\n')
for i in range(2, m):
t, v = feat_eng.kmeans_clusterer(train_df, valid_df, n=i)
col_name = str('kmeans_' + str(i))
t = | pd.DataFrame({col_name: t}) | pandas.DataFrame |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not | is_list_like(new_categories) | pandas.core.dtypes.common.is_list_like |
import pandas as pd
import os
from glob import glob
from pandas.core.frame import DataFrame
from tqdm import tqdm
import numpy as np
from numpy.random import randint
from typing import Union, Tuple
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import RegressorMixin
from collections import OrderedDict
from copy import deepcopy
from covid_xprize.nixtamalai.helpers import add_geo_id
from covid_xprize.nixtamalai.analyze_predictor import IP_COLS
from microtc.utils import load_model
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
NUM_PRESCRIPTIONS = 10
# Faster than is_pareto_efficient_simple, but less readable.
def is_pareto_efficient(costs, return_mask = True):
"""
Taken from: https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
Find the pareto-efficient points
:param costs: An (n_points, n_costs) array
:param return_mask: True to return a mask
:return: An array of indices of pareto-efficient points.
If return_mask is True, this will be an (n_points, ) boolean array
Otherwise it will be a (n_efficient_points, ) integer array of indices.
"""
is_efficient = np.arange(costs.shape[0])
n_points = costs.shape[0]
next_point_index = 0 # Next index in the is_efficient array to search for
while next_point_index<len(costs):
nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1)
nondominated_point_mask[next_point_index] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
costs = costs[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1
if return_mask:
is_efficient_mask = np.zeros(n_points, dtype = bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask
else:
return is_efficient
def prescription_cases(output: Union[str, None] = "presc-cases.csv") -> pd.DataFrame:
FILES = glob(os.path.join(ROOT_DIR, "..", "..", "prescriptions/*2021-01-28.csv"))
FILES.sort()
prescriptions = {os.path.basename(fname).split("-")[0]:
| pd.read_csv(fname, parse_dates=["Date"], index_col=["Date"]) | pandas.read_csv |
"""
Class Features
Name: lib_data_geo_shapefile
Author(s): <NAME> (<EMAIL>)
Date: '20210113'
Version: '1.0.0'
"""
#######################################################################################
# Libraries
import logging
import pandas as pd
import geopandas as gpd
from copy import deepcopy
from lib_info_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to find data section
def find_data_section(section_df, section_name=None, basin_name=None,
tag_column_section_in='section_name', tag_column_basin_in='section_domain',
tag_column_section_out='section_name', tag_column_basin_out='basin_name'):
section_name_ref = section_name.lower()
basin_name_ref = basin_name.lower()
section_name_list = section_df[tag_column_section_in].values
basin_name_list = section_df[tag_column_basin_in].values
section_dict_tmp = {tag_column_section_in: section_name_list, tag_column_basin_in: basin_name_list}
section_df_tmp = pd.DataFrame(data=section_dict_tmp)
section_df_tmp = section_df_tmp.astype(str).apply(lambda x: x.str.lower())
point_idx = section_df_tmp[(section_df_tmp[tag_column_section_in] == section_name_ref) &
(section_df_tmp[tag_column_basin_in] == basin_name_ref)].index
if point_idx.shape[0] == 1:
point_idx = point_idx[0]
point_dict = section_df.iloc[point_idx, :].to_dict()
point_dict[tag_column_section_out] = point_dict.pop(tag_column_section_in)
point_dict[tag_column_basin_out] = point_dict.pop(tag_column_basin_in)
elif point_idx.shape[0] == 0:
log_stream.error(' ===> Section idx not found in the section dictionary.')
raise IOError('Section selection failed; section not found')
else:
log_stream.error(' ===> Section idx not found. Procedure will be exit for unexpected error.')
raise NotImplementedError('Section selection failed for unknown reason.')
return point_dict
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read shapefile section(s)
def read_data_section(file_name, file_filter=None,
columns_name_expected_in=None, columns_name_expected_out=None, columns_name_type=None):
if columns_name_expected_in is None:
columns_name_expected_in = [
'HMC_X', 'HMC_Y', 'LAT', 'LON', 'BASIN', 'SEC_NAME', 'SEC_RS', 'AREA', 'Q_THR1', 'Q_THR2', 'TYPE']
if columns_name_expected_out is None:
columns_name_expected_out = [
'hmc_idx_x', 'hmc_idx_y', 'latitude', 'longitude', 'section_domain', 'section_name', 'section_code',
'section_drained_area', 'section_discharge_thr_alert', 'section_discharge_thr_alarm', 'section_type']
if columns_name_type is None:
columns_name_type = ['int', 'int', 'float', 'float',
'str', 'str', 'float', 'float', 'float', 'float',
'str']
file_dframe_raw = gpd.read_file(file_name)
file_rows = file_dframe_raw.shape[0]
if file_filter is not None:
file_dframe_step = deepcopy(file_dframe_raw)
for filter_key, filter_value in file_filter.items():
file_columns_check = [x.lower() for x in list(file_dframe_raw.columns)]
if filter_key.lower() in file_columns_check:
if isinstance(filter_value, str):
id_key = file_columns_check.index(filter_key)
filter_column = list(file_dframe_raw.columns)[id_key]
file_dframe_step = file_dframe_step.loc[
file_dframe_step[filter_column].str.lower() == filter_value.lower()]
else:
log_stream.error(' ===> Filter datatype is not allowed.')
raise NotImplementedError('Datatype not implemented yet')
file_dframe_raw = deepcopy(file_dframe_step)
section_obj = {}
for column_name_in, column_name_out, column_type in zip(columns_name_expected_in,
columns_name_expected_out, columns_name_type):
if column_name_in in file_dframe_raw.columns:
column_data = file_dframe_raw[column_name_in].values.tolist()
else:
if column_type == 'int':
column_data = [-9999] * file_rows
elif column_type == 'str':
column_data = [''] * file_rows
elif column_type == 'float':
column_data = [-9999.0] * file_rows
else:
log_stream.error(' ===> Column datatype is not allowed.')
raise NotImplementedError('Datatype not implemented yet')
section_obj[column_name_out] = column_data
section_df = | pd.DataFrame(data=section_obj) | pandas.DataFrame |
import pandas as pd
import urllib.request
import traceback
from backend.common import *
DATA_PATH = f'{get_root()}/data/world.xlsx'
def consolidate_country_col(df, country_col, country_id_col, covid_df):
"""
This method adjusts the values in the country field of the passed DF
so that the values are matching those in the covid_DF whenever possible,
so that we can subsequently join them on the country field.
"""
covid_countries = covid_df[['country_id', 'country']].drop_duplicates()
covid_countries['country_lower'] = covid_countries['country'].str.lower()
covid_countries['country_id_lower'] = covid_countries['country_id'].str.lower()
df = df.rename(columns={
country_col: 'country_other',
country_id_col: 'country_id_other',
})
df['country_other_lower'] = df['country_other'].str.lower()
df['country_id_other_lower'] = df['country_id_other'].str.lower()
def _take_first_non_null_col(_df, _cols):
return _df[_cols].fillna(method='bfill', axis=1).iloc[:, 0]
def _consolidate_on(_df, col):
_join_df = covid_countries.set_index(f'{col}_lower')
_df = _df.join(_join_df, on=f'{col}_other_lower')
_df['country_other'] = _take_first_non_null_col(_df, ['country', 'country_other'])
for c in _join_df.columns:
del _df[c]
return _df
df = _consolidate_on(df, 'country_id')
df = _consolidate_on(df, 'country')
df = df[df['country_other'].isin(covid_countries['country'])]
del df['country_id_other']
del df['country_other_lower']
del df['country_id_other_lower']
df = df.rename(columns={
'country_other': 'country'
})
return df
def get_google_mobility_df(covid_df):
url = 'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv'
df = pd.read_csv(url, nrows=1)
dtypes = {col: 'float' if col.endswith('baseline') else 'object' for col in df.columns}
df = pd.read_csv(url, dtype=dtypes)
del df['iso_3166_2_code']
del df['census_fips_code']
df = consolidate_country_col(df, 'country_region', 'country_region_code', covid_df)
df = df[ | pd.isna(df['sub_region_1']) | pandas.isna |
from abc import ABC
from dataclasses import dataclass
from typing import Optional, Union, Any, Callable, Tuple, Dict
import numpy as np
import pandas as pd
from django.apps import apps
MODEL_ITEM_FIELD_MAP: Dict[str, Tuple[str]] = {
"IntegerItem": (
"PositiveSmallIntegerField",
"SmallIntegerField",
"PositiveIntegerField",
),
"FloatItem": ("FloatField",),
"StringItem": ("CharField", "TextField", "ForeignKey"),
"DateItem": ("DateField",),
"ForeignKeyItem": ("ForeignKey",),
"DateTimeItem": ("DateTimeField",),
"ChoiceItem": ("CharField",),
"ArrayItem": ("ArrayField",),
"BooleanItem": ("BooleanField", "NullBooleanField"),
}
def parse_int_or(value: str, default: Optional[Any] = None) -> Optional[int]:
try:
return int(value)
except ValueError:
return default
def parse_float_or(value: str, default: Optional[Any] = None) -> Optional[float]:
try:
return float(value)
except ValueError:
return default
def lower_first(value: str) -> str:
assert isinstance(value, str)
return value[:1].lower() + value[1:] if value else ""
@dataclass(eq=False, frozen=True)
class Item(ABC):
label: str
width: int
start: int
@property
def key(self) -> str:
return self.label
def transform(self, s: pd.Series) -> Union[pd.Series, pd.DataFrame]:
raise NotImplementedError
def _validate(self) -> None:
assert self.width >= 0, "width must be greater than or equal to 0"
assert self.start >= 0, "start must be greater than or equal to 0"
@dataclass(eq=False, frozen=True)
class ModelItem(Item, ABC):
symbol: str
@property
def key(self) -> str:
value = self.symbol.split(".", maxsplit=1).pop()
return lower_first(value).replace(".", "__")
def get_model(self) -> Any:
model, _ = self.symbol.rsplit(".", maxsplit=1)
return apps.get_model(model)
def get_field(self) -> Any:
_, field = self.symbol.rsplit(".", maxsplit=1)
return self.get_model()._meta.get_field(field)
def _validate(self) -> None:
super()._validate()
assert len(self.symbol.split(".")) == 3, f"invalid symbol <{self.symbol}>"
internal_type = self.get_field().get_internal_type()
assert internal_type in MODEL_ITEM_FIELD_MAP.get(self.__class__.__name__), (
f"field <name: {self.get_field().name}, type: {internal_type}> "
f"not found in MODEL_ITEM_FIELD_MAP['{self.__class__.__name__}']"
)
@dataclass(eq=False, frozen=True)
class IntegerItem(ModelItem):
default: Optional[int] = None
def transform(self, s: pd.Series) -> Union[pd.Series, pd.DataFrame]:
self._validate()
return s.apply(parse_int_or, args=(self.default,)).astype("Int64")
@dataclass(eq=False, frozen=True)
class FloatItem(ModelItem):
default: Optional[float] = np.nan
scale: float = 1.0
def transform(self, s: pd.Series) -> Union[pd.Series, pd.DataFrame]:
self._validate()
return (
s.apply(parse_float_or, args=(self.default,))
.apply(lambda n: n * self.scale)
.astype(float)
)
@dataclass(eq=False, frozen=True)
class ArrayItem(ModelItem):
size: int
mapper: Callable = None
@property
def element_width(self) -> int:
return int(self.width / self.size)
@property
def base_field(self):
return self.get_field().base_field
def transform(self, se: pd.Series) -> Union[pd.Series, pd.DataFrame]:
self._validate()
base_field_type = self.base_field.get_internal_type()
se = se.copy()
if base_field_type in MODEL_ITEM_FIELD_MAP["IntegerItem"]:
se = se.apply(lambda a: [parse_int_or(el) for el in a])
elif base_field_type in MODEL_ITEM_FIELD_MAP["FloatItem"]:
se = se.apply(lambda a: [parse_float_or(el) for el in a])
else:
se = se.map(list).map(
lambda lst: [None if x.replace(" ", "") == "" else x for x in lst]
)
if self.mapper:
se = se.apply(lambda arr: [self.mapper(item) for item in arr])
return se
def _validate(self) -> None:
super()._validate()
assert (
self.size >= 0
), f"size must be greater than or equal to zero <size: {self.size}>"
assert (
self.width % self.size == 0
), f"width must be divisible by size <width: {self.width}, size: {self.size}>"
@dataclass(eq=False, frozen=True)
class ForeignKeyItem(ModelItem):
related_symbol: str
@property
def key(self) -> str:
key = super(ForeignKeyItem, self).key
return "_".join([key, self.get_remote_field().name])
def get_remote_field(self):
model, field = self.related_symbol.rsplit(".", maxsplit=1)
return apps.get_model(model)._meta.get_field(field)
def transform(self, s: pd.Series) -> Union[pd.Series, pd.DataFrame]:
self._validate()
remote_field = self.get_remote_field()
remote_records = remote_field.model.objects.filter(
**{f"{remote_field.name}__in": s}
).values(remote_field.name, "id")
model_name = self.get_model()._meta.model_name
field_name = self.get_field().column
index_name = "__".join((model_name, field_name))
return (
s.map(
{record[remote_field.name]: record["id"] for record in remote_records}
)
.astype("Int64")
.rename(index_name)
)
def _validate(self) -> None:
super()._validate()
symbol_parts = len(self.related_symbol.split("."))
assert symbol_parts == 3, f"invalid related symbol <{self.related_symbol}>"
@dataclass(eq=False, frozen=True)
class DateItem(ModelItem):
format: str = "%Y%m%d"
def transform(self, s: pd.Series) -> Union[pd.Series, pd.DataFrame]:
self._validate()
date = pd.to_datetime(s, format=self.format, errors="coerce").dt.date
return date.astype(object).where(date.notnull(), None)
@dataclass(eq=False, frozen=True)
class DateTimeItem(ModelItem):
format: str = "%Y%m%d%H%M"
tz: str = "Asia/Tokyo"
def transform(self, s: pd.Series) -> Union[pd.Series, pd.DataFrame]:
self._validate()
return (
| pd.to_datetime(s, format=self.format) | pandas.to_datetime |
import os
import os.path
import time
import requests
import pandas as pd
import asyncio
import json
from aiohttp import ClientSession
from bs4 import BeautifulSoup
from multiprocessing.pool import ThreadPool
from app.config import DATA_RAW_PATH
STATES_URL = 'https://api.kevalaanalytics.com/geography/states/'
REGIONS_URL = 'http://assessor.keva.la/cleanenergyprogress/geographies?state=%s&type=%s'
HTML_URL = 'http://assessor.keva.la/cleanenergyprogress/analytics?area_type=%s&area_id=%s'
STATES_INPUT_URL = "http://assessor.keva.la/cleanenergyprogress/states?states="
METADATA_CSV = os.path.join(DATA_RAW_PATH % 'jobs', 'jobs_metadata.csv')
STATES_META_OUTPUT_FILE = os.path.join(DATA_RAW_PATH % 'jobs', 'states-cleaned.json')
OUTPUT_CSV = os.path.join(DATA_RAW_PATH % 'jobs', 'all.csv')
REGION_TYPES = [
('county', 'counties'),
('sldu', 'legislativedistrictsupper'),
('sldl', 'legislativedistrictslower'),
('cd', 'congressionaldistricts')]
JOB_STAT_KEYS = [
'countSolarJobs',
'countWindJobs',
'countEnergyJobs',
'totalJobs',
'percentOfStateJobs',
'residentialMWhInvested',
'commercialMWhInvested',
'utilityMWhInvested',
'totalMWhInvested',
'residentialDollarsInvested',
'commercialDollarsInvested',
'utilityDollarsInvested',
'totalDollarsInvested',
'investmentHomesEquivalent',
'countResidentialInstallations',
'countCommercialInstallations',
'countUtilityInstallations',
'countTotalInstallations',
'residentialMWCapacity',
'commercialMWCapacity',
'utilityMWCapacity',
'totalMWCapacity'
]
CSV_KEYS = [
'stateAbbr',
'geoType',
'name',
'geoid',
'sourceURL'
]
CSV_KEYS.extend(JOB_STAT_KEYS)
HTML_STRUCTURE = {
'tables': [
['countSolarJobs', 'countWindJobs', 'countEnergyJobs'],
['residentialDollarsInvested', 'residentialMWhInvested', 'commercialDollarsInvested',
'commercialMWhInvested', 'utilityDollarsInvested', 'utilityMWhInvested'],
['countResidentialInstallations', 'residentialMWCapacity', 'countCommercialInstallations',
'commercialMWCapacity', 'countUtilityInstallations', 'utilityMWCapacity'],
],
'totals': [
['totalJobs', 'percentOfStateJobs'],
['totalDollarsInvested', 'totalMWhInvested', 'investmentHomesEquivalent'],
['countTotalInstallations', 'totalMWCapacity']
]
}
async def fetch_region_list(url, session, region_type, state_abbr):
async with session.get(url) as response:
resp = await response.json()
print("Got list [%s, %s]" % (region_type, state_abbr))
return (resp, region_type, state_abbr)
async def process_states(states_and_geoids):
fetch_tasks = []
df = pd.DataFrame(columns=['state_abbr', 'region_type', 'geoid', 'name', 'html_url'])
# Fetch all responses within one Client session,
# keep connection alive for all requests.
async with ClientSession() as session:
for state_abbr, state_geoid in states_and_geoids:
df = df.append({
'state_abbr': state_abbr,
'region_type': 'state',
'geoid': state_geoid,
'name': None,
'html_url': HTML_URL % ('state', state_geoid),
}, ignore_index=True)
for (region_type_singular, region_type_plural) in REGION_TYPES:
url = REGIONS_URL % (state_geoid, region_type_plural)
task = asyncio.ensure_future(
fetch_region_list(url, session, region_type_singular, state_abbr))
fetch_tasks.append(task)
responses = await asyncio.gather(*fetch_tasks)
for response_json, region_type, state_abbr in responses:
for idx, region in enumerate(response_json['features']):
if state_abbr in ('DC', 'NE') and region_type == 'sldl':
continue # DC and NE both doesn't have a state house
props = region['properties']
df = df.append({
'state_abbr': state_abbr,
'region_type': region_type,
'geoid': props.get('geoid'),
'name': props.get('name'),
'html_url': HTML_URL % (region_type, props.get('geoid')),
}, ignore_index=True)
print("Appended %d regions" % (idx + 1), end='\r')
return df
def download_jobs_metadata():
states_json = requests.get(STATES_URL).json()
states_and_geoids = [(s['usps'], s['geoid']) for s in states_json]
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(process_states(states_and_geoids))
df = loop.run_until_complete(future)
df.to_csv(METADATA_CSV, index=False)
async def fetch_states_json_from_url(session, url):
async with session.get(url) as resp:
return await resp.json()
async def get_raw_states_json():
async with ClientSession() as session:
return await fetch_states_json_from_url(session, STATES_INPUT_URL)
def scrape_energy_job_states():
states = []
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(get_raw_states_json())
input_data = loop.run_until_complete(future)
for feature in input_data['features']:
state_data = feature['properties']
state_data["html_url"] = HTML_URL % (state_data['geography_type'],
state_data['geoid'])
states.append(state_data)
output_data = {
"states": states
}
with open(STATES_META_OUTPUT_FILE, 'w') as output_file:
json.dump(output_data, output_file)
def scrape(metadata, attempt=1):
url = metadata['html_url']
_idx = metadata['_idx']
with requests.get(url) as response:
row = {
'stateAbbr': metadata['state_abbr'],
'geoid': metadata['geoid'],
'geoType': metadata['region_type'],
'name': metadata['name'],
'sourceURL': metadata['html_url'],
}
unique_key = url.replace('http://assessor.keva.la/cleanenergyprogress', '')
if attempt > 3:
print(f"{_idx}: [{attempt}/3] – {response.status_code} – FAIL – {unique_key}")
return None
if response.status_code >= 400:
print(f"{_idx}: [{attempt}/3] – {response.status_code} – RETRY – {unique_key}")
time.sleep(3)
return scrape(metadata, attempt + 1)
html = response.text
soup = BeautifulSoup(html, 'html5lib')
row['name'] = soup.find('span', id='geography__name').text.strip()
outer_divs = soup.find_all('div', class_='analytics_data')
for keylist, outerdiv in zip(HTML_STRUCTURE['tables'], outer_divs):
tds = outerdiv.find_all('td', class_='table_data')
values = [elem.text.strip() for elem in tds[:len(keylist)]]
for idx, key in enumerate(keylist):
row[key] = values[idx]
li_buckets = soup.find_all('li', class_=None)
if len(li_buckets) != 3:
print(f"{_idx}: [{attempt}/3] – {response.status_code} – PARSE – {unique_key}")
print("li_buckets:", li_buckets)
print(html)
raise ValueError
for keylist, outerli in zip(HTML_STRUCTURE['totals'], li_buckets):
total_spans = outerli.find_all('span', class_='analytics_total_num')
totals = [elem.text.strip() for elem in total_spans]
if metadata['region_type'] == 'state' and keylist[-1] == 'percentOfStateJobs':
keylist = keylist[:-1]
if len(totals) == 0:
for key in keylist:
row[key] = 0
elif len(totals) != len(keylist):
print(f"{_idx}: [{attempt}/3] – {response.status_code} – PARSE – {unique_key}")
print("totals:", totals, keylist)
print(html)
raise ValueError
else:
for idx, key in enumerate(keylist):
row[key] = totals[idx]
print(f"{_idx}: [{attempt}/3] – {response.status_code} – OK – {unique_key}")
return row
def scrape_jobs_data():
jobs_data = None
if os.path.exists(OUTPUT_CSV):
jobs_data = | pd.read_csv(OUTPUT_CSV, encoding='ISO-8859-1') | pandas.read_csv |
"""
The point of this file is to (1) see what models predict given a mask,
(2) calculate the precision-recall and average
"""
import pandas as pd
import os, argparse
from tqdm import tqdm
import importlib.util
############################# Helpers! #############################
def parse_args():
parser = argparse.ArgumentParser(description='arguments for analyzing how models perform given challenges')
parser.add_argument('-d', help="Path to the challenges", default="../../data/winoventi_bert_large_final.tsv")
parser.add_argument('-output_probs_path', help="Path to the outcome predicted probabilities", default="./assets/output_predicted_probabilities.tsv")
parser.add_argument('-write_output_probs_path', help="Value to check if we're writing to the output_probs_path or not, ['True', 'False']", default="False")
parser.add_argument('-output_aggregated_path', help="Path to the results (aggregated per model)", default="./assets/output_aggregated.tsv")
parser.add_argument('-write_output_aggregated_path', help="Value to check if we're writing to the output_aggregated_path or not, ['True', 'False']", default="False")
parser.add_argument('-associative_bias_registry_path', help="Path to the associative bias registry", default="./../../data/assets/associativebias_registry.tsv")
# Brown University argument
parser.add_argument('-dept', help="whether we're on the department machine or not", default="True")
return parser.parse_args()
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# import the necessary functions from `maskedlm.py`
MASKEDLM_MODULE_PATH = "./../../code/maskedlm/maskedlm.py"
# function `predict_prefix_probability`
predict_prefix_probability = module_from_file('predict_prefix_probability', MASKEDLM_MODULE_PATH).predict_prefix_probability
# list of model names of interest
MODEL_NAMES = module_from_file('MODEL_NAMES', MASKEDLM_MODULE_PATH).MODEL_NAMES
# and the function to load the tokenizer and model
load_model_and_tokenizer = module_from_file('load_model_and_tokenizer', MASKEDLM_MODULE_PATH).load_model_and_tokenizer
huggingface_to_model_name = {
"bert-base-cased": "BERT_base",
"bert-large-cased-whole-word-masking": "BERT_large",
"roberta-base": "RoBERTa_small",
"roberta-large": "RoBERTa_large",
"distilroberta-base": "DistilRoBERTa",
"squeezebert/squeezebert-uncased": "SqueezeBERT",
"google/mobilebert-uncased": "MobileBERT",
"albert-base-v2": "ALBERT_base",
"albert-large-v2": "ALBERT_large",
"albert-xlarge-v2": "ALBERT_xlarge",
"albert-xxlarge-v2": "ALBERT_xxlarge",
"distilbert-base-cased": "DistilBERT"
}
model_name_to_huggingface = {
"BERT_base": "bert-base-cased",
"BERT_large": "bert-large-cased-whole-word-masking",
"RoBERTa_small": "roberta-base",
"RoBERTa_large": "roberta-large",
"DistilRoBERTa": "distilroberta-base",
"DistilBERT": "distilbert-base-cased",
"SqueezeBERT": "squeezebert/squeezebert-uncased",
"MobileBERT": "google/mobilebert-uncased",
"ALBERT_base": "albert-base-v2",
"ALBERT_large": "albert-large-v2",
"ALBERT_xlarge": "albert-xlarge-v2",
"ALBERT_xxlarge": "albert-xxlarge-v2"
}
###########################################################################
############## Functions to use models to add probabilities ###############
def add_probabilities_maskedlm(df, tokenizer, model, model_name):
"""
this function is to add two columns, p_target_(MODEL_NAME)
(e.g. p_target_BERT_large) and p_incorrect_(MODEL_NAME) (e.g. p_incorrect_BERT_large)
"""
def get_probability(masked_prompt, word):
return predict_prefix_probability(tokenizer, model, masked_prompt, word, masked_prefix=masked_prompt)
# This is so that we don't mutate
new_df = df.copy()
# If these two columns are already calculated, we'll just return new_df
p_target_col_name, p_incorrect_col_name = f"p_target_{model_name}", f"p_incorrect_{model_name}"
existing_columns = new_df.columns
# if the probabilities for target and other choice have been calculated, we'll just move on
if p_target_col_name in existing_columns and p_incorrect_col_name in existing_columns:
return new_df
# If not, we will go ahead and calculate things
new_df[p_target_col_name] = new_df.apply(lambda x: get_probability(x["masked_prompt"], x["target"]), axis=1)
new_df[p_incorrect_col_name] = new_df.apply(lambda x: get_probability(x["masked_prompt"], x["incorrect"]), axis=1)
# and then return the new thang
return new_df
###########################################################################
############# Functions to analyze precision-recall-accuracy ##############
def get_mutually_passed_associativebias(to_filter_df, m_names: list):
"""
get the samples that pass all associative bias requirement for *all
models* in m_names (Word, Associative Bias, Alternative), and then filter
to_filter_df to get the correct ones
Example usage:
get_mutually_passed_associative_bias(
df_that_contains_examples_to_filter,
["BERT_base", "BERT_large", "RoBERTa_small", "RoBERTa_large"]
)
"""
qualified_asscbias_rows = {}
def filter_asscbias_helper(row):
# helper to filter out rows that don't satisfy m_names associative bias reqs
for m in m_names:
if not row[f"{model_name_to_huggingface[m]}"]: return False
return True
def populate_qualified_asscbias_rows(row):
# called by qualified_unflattened_df, to populate qualified_assscbias_rows
qualified_asscbias_rows[(row["Word"], row["Associative Bias"], row["Alternative"])] = 1
def filter_to_filter_df_helper(row):
return (row["Word"], row["Associative Bias"], row["Alternative"]) in qualified_asscbias_rows
asscbiaspass_df = | pd.read_csv(ASSSOCIATIVE_BIAS_REGISTRY_PATH, sep="\t") | pandas.read_csv |
import sys
import os.path
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from geopy.distance import geodesic
import os
import pandas as pd
from utils.io import write_json, read_json
from graph.populate_graph import load_member_summaries
def clean_up_json(d):
return {
k: (
{v: sorted(d[k][v])
for v in d[k]}
if isinstance(d[k], dict)
else sorted(d[k])
)
for k in d
}
def build_sector_commerces_maps(
paid_member_commerces_filename=os.path.join("directory",
"member_commerces_flat.csv"),
paid_member_commerces_json_filename=os.path.join("directory",
"member_commerces(commerces).json"),
paid_member_json_filename=os.path.join("directory",
"member_commerces(members).json"),
paid_member_sectors_json_filename = os.path.join("directory",
"member_sectors.json")
):
'''
load dicts if they exist
'''
if os.path.exists(paid_member_commerces_filename) and os.path.exists(paid_member_json_filename)\
and os.path.exists(paid_member_sectors_json_filename):
commerces = read_json(paid_member_commerces_json_filename)
sectors = read_json(paid_member_sectors_json_filename)
members = read_json(paid_member_json_filename)
else:
assert os.path.exists(paid_member_commerces_filename)
paid_member_commerces_df = pd.read_csv(paid_member_commerces_filename, index_col=0)
sectors = dict()
commerces = dict()
members = dict()
for _, row in paid_member_commerces_df.iterrows():
commerce_name = row["commerce_name"]
commerce_type = row["commerce_type"]
member_name = row["member_name"]
sector_name = row["member_sector"]
if commerce_name not in commerces:
commerces[commerce_name] = {"buys": set(), "sells": set()}
commerces[commerce_name][commerce_type].add(member_name)
if member_name not in members:
members[member_name] = {"buys": set(), "sells": set(), "sectors": set()}
members[member_name][commerce_type].add(commerce_name)
members[member_name]["sectors"].add(sector_name)
if sector_name not in sectors:
sectors[sector_name] = set()
sectors[sector_name].add(member_name)
# convert set to list
commerces = clean_up_json(commerces)
members = clean_up_json(members)
sectors = clean_up_json(sectors)
# write to file
write_json(commerces, paid_member_commerces_json_filename)
write_json(sectors, paid_member_sectors_json_filename)
write_json(members, paid_member_json_filename)
return commerces, sectors, members
def build_messages_map(
message_df_filename=os.path.join("messages", "production_all_messages_flat.csv"),
all_messages_json_filename=os.path.join("messages", "production_all_messages.json"),
):
if os.path.exists(all_messages_json_filename):
all_messages = read_json(all_messages_json_filename)
else:
messages_df = pd.read_csv("messages/production_all_messages_flat.csv", index_col=0)
all_messages = dict()
for _, row in messages_df.iterrows():
sender_member_name = row["sender_member_name"]
recipient_member_name = row["recipient_member_name"]
key = ":_:".join(sorted([sender_member_name, recipient_member_name]))
if key not in all_messages:
all_messages[key] = set()
all_messages[key].add(row["message"])
all_messages = clean_up_json(all_messages)
write_json(all_messages, all_messages_json_filename)
return all_messages
def build_user_follows_map(
user_follows_filename=os.path.join("users", "all_user_follows_production.csv"),
all_user_follows_json_filename=os.path.join("users", "production_all_user_follows.json"),
):
if os.path.exists(all_user_follows_json_filename):
user_follows = read_json(all_user_follows_json_filename)
else:
user_follows_df = pd.read_csv(user_follows_filename, index_col=0)
user_follows = dict()
for _, row in user_follows_df.iterrows():
user = row["full_name"]
company = row["company_name"]
followed_member = row["followed_member"]
key = ":_:".join(sorted([company, followed_member]))
if key not in user_follows:
user_follows[key] = set()
user_follows[key].add(user)
user_follows = clean_up_json(user_follows)
write_json(user_follows, all_user_follows_json_filename)
return user_follows
def build_event_attendee_map(
event_attendee_filename=os.path.join("events", "all_event_attendees_production.csv"),
event_attendee_json_filename=os.path.join("events", "all_event_attendees_production.json")):
if os.path.exists(event_attendee_json_filename):
event_attendees = read_json(event_attendee_json_filename)
else:
event_attendees_df = pd.read_csv(event_attendee_filename, index_col=0)
event_attendees = dict()
for _, row in event_attendees_df.iterrows():
event_name = row["event_name"]
starts_at = row["starts_at"]
user = row["attendee_name"]
company = row["company"]
key = f"{event_name}_{starts_at}"
if key not in event_attendees:
event_attendees[key] = set()
event_attendees[key].add(company)
event_attendees = clean_up_json(event_attendees)
write_json(event_attendees, event_attendee_json_filename)
return event_attendees
def compute_geographical_distance(
member_pairs,
# member_summaries_filename=os.path.join("paid_member_summaries_production.csv"),
):
# member_summaries = pd.read_csv(member_summaries_filename, index_col=0)
member_summaries = load_member_summaries()
member_to_lat_long = {
row["member_name"]: (row["latitude"], row["longitude"])
for _, row in member_summaries.iterrows()
if not | pd.isnull(row["latitude"]) | pandas.isnull |
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""
Markov Chain Montecarlo Simulator of the daily customer flux in a supermarket
""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
# import built-in libraries
import os
import datetime as dt
import time
from colorama import Fore, Style, init
init()
import logging
logging.basicConfig(level=logging.WARNING, format="%(message)s")
# import other libraries
import numpy as np
import pandas as pd
from faker import Faker
from pyfiglet import Figlet
# import scripts
import proba
class Customer:
"""
a single customer that moves through the supermarket
in a MCMC simulation
"""
def __init__(self, name, state="entrance", budget=100):
self.name = name
self.state = state
self.budget = budget
def __repr__(self):
return f"{self.name} is in {self.state}."
def next_state(self):
"""
Propagates the customer to the next state.
Returns nothing.
"""
# WARNING: CHECK THE ORDER OF THE AISLES WHEN THE ACTUAL MATRIX ARRIVES
aisles = ["checkout", "dairy", "drinks", "fruit", "spices"]
if self.state in aisles:
if self.state == "dairy":
initial_state = np.array([0.0, 1.0, 0.0, 0.0, 0.0])
elif self.state == "drinks":
initial_state = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
elif self.state == "fruit":
initial_state = np.array([0.0, 0.0, 0.0, 1.0, 0.0])
elif self.state == "spices":
initial_state = np.array([0.0, 0.0, 0.0, 0.0, 1.0])
elif self.state == "checkout":
initial_state = np.array([1.0, 0.0, 0.0, 0.0, 0.0])
next_state_prob = np.dot(initial_state, proba.prob)
self.state = np.random.choice(aisles, p=next_state_prob)
else:
self.state = np.random.choice(
["spices", "drinks", "fruit", "dairy"], p=proba.ent_prob
)
def is_active(self):
"""Returns True if the customer has not reached the checkout yet."""
if self.state == "checkout":
return False
else:
return True
class SuperMarket:
"""manages multiple Customer instances that are currently in the market."""
def __init__(self):
"""a list of Customer objects"""
self.customers = []
self.minutes = dt.datetime(today.year, today.month, today.day, 6, 50)
# self.last_id = 0
self.state = "closed"
def __repr__(self):
return (
Fore.CYAN
+ f"\n{self.minutes} – The supermarket is {self.state}: currently, there are {len(self.customers)} customers inside.\n"
+ Style.RESET_ALL
)
def get_time(self):
"""opens and closes the supermarket,
and pushes customers to the checkout,
"""
if (
self.minutes.hour > 22
or self.minutes.hour <= 6
and self.minutes.minute <= 59
):
logging.warning(
f"{self.minutes} - The supermarket is closed. It will reopen at 7 AM."
)
elif self.minutes.hour == 22 and self.minutes.minute == 0:
logging.warning(
Fore.RED
+ f"{self.minutes} - The supermarket has closed."
+ Style.RESET_ALL
)
self.state = "closed"
elif self.minutes.hour == 21 and self.minutes.minute == 57:
for cust in self.customers:
if cust.is_active() == True:
cust.state = "checkout"
elif self.minutes.hour == 7 and self.minutes.minute == 0:
logging.warning(
Fore.GREEN
+ f"{self.minutes} - The supermarket has opened its doors!\n"
+ Style.RESET_ALL
)
self.state = "open"
else:
self.state = "open"
return None
def add_new_customers(self):
"""generate new customers at their initial location based on the fluxes illustrated in the EDA."""
if (
self.minutes.hour >= 22
or self.minutes.hour <= 6
and self.minutes.minute <= 59
):
pass
else:
tm = str(self.minutes)[-8:]
try:
cust_no = int(proba.entrance_number.loc[tm])
except:
cust_no = 0
for cust in range(cust_no):
c = Customer(f.name())
logging.warning(
Fore.YELLOW
+ f"{self.minutes} - {c.name} has entered the supermarket."
+ Style.RESET_ALL
)
self.customers.append(c)
return None
def next_minute(self):
"""increase the time of the supermarket by one minute,
propagates all customers to the next state.
"""
self.minutes = self.minutes + dt.timedelta(minutes=1)
if self.minutes.hour in [i for i in range(0, 24)] and self.minutes.minute == 0:
logging.warning(self)
for cust in self.customers:
cust.next_state()
logging.warning(f"{self.minutes} – {cust}")
return None
def remove_exiting_customers(self):
"""removes every customer that is not active any more."""
for cust in self.customers:
if cust.is_active() == False:
logging.warning(
Fore.BLUE
+ f"{self.minutes} - {cust.name} has left the supermarket."
+ Style.RESET_ALL
)
self.customers.remove(cust)
return None
def record_customers(self):
"""append the state of different customers to a log DataFrame."""
df = pd.DataFrame(columns=["time", "customer", "location"])
for cust in self.customers:
if cust.state == "checkout":
final_st = "checkout and leave"
else:
final_st = cust.state
row = pd.DataFrame(
data=[str(self.minutes)[-8:], cust.name, final_st],
index=["time", "customer", "location"],
).transpose()
df = pd.concat([df, row], ignore_index=True)
return df
if __name__ == "__main__":
# output DataFrame
record = | pd.DataFrame(columns=["time", "customer", "location"]) | pandas.DataFrame |
r"""
Composition Statistics (:mod:`skbio.stats.composition`)
=======================================================
.. currentmodule:: skbio.stats.composition
This module provides functions for compositional data analysis.
Many 'omics datasets are inherently compositional - meaning that they
are best interpreted as proportions or percentages rather than
absolute counts.
Formally, :math:`x` is a composition if :math:`\sum_{i=0}^D x_{i} = c`
and :math:`x_{i} > 0`, :math:`1 \leq i \leq D` and :math:`c` is a real
valued constant and there are :math:`D` components for each
composition. In this module :math:`c=1`. Compositional data can be
analyzed using Aitchison geometry. [1]_
However, in this framework, standard real Euclidean operations such as
addition and multiplication no longer apply. Only operations such as
perturbation and power can be used to manipulate this data.
This module allows two styles of manipulation of compositional data.
Compositional data can be analyzed using perturbation and power
operations, which can be useful for simulation studies. The
alternative strategy is to transform compositional data into the real
space. Right now, the centre log ratio transform (clr) and
the isometric log ratio transform (ilr) [2]_ can be used to accomplish
this. This transform can be useful for performing standard statistical
tools such as parametric hypothesis testing, regressions and more.
The major caveat of using this framework is dealing with zeros. In
the Aitchison geometry, only compositions with nonzero components can
be considered. The multiplicative replacement technique [3]_ can be
used to substitute these zeros with small pseudocounts without
introducing major distortions to the data.
Functions
---------
.. autosummary::
:toctree:
closure
multiplicative_replacement
perturb
perturb_inv
power
inner
clr
clr_inv
ilr
ilr_inv
alr
alr_inv
centralize
ancom
sbp_basis
References
----------
.. [1] <NAME>, <NAME>, R. Tolosana-Delgado (2015),
Modeling and Analysis of Compositional Data, Wiley, Chichester, UK
.. [2] <NAME>., "Isometric Logratio Transformations for
Compositional Data Analysis" Mathematical Geology, 35.3 (2003)
.. [3] <NAME>, "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation",
Mathematical Geology, 35.3 (2003)
Examples
--------
>>> import numpy as np
Consider a very simple environment with only 3 species. The species
in the environment are equally distributed and their proportions are
equivalent:
>>> otus = np.array([1./3, 1./3., 1./3])
Suppose that an antibiotic kills off half of the population for the
first two species, but doesn't harm the third species. Then the
perturbation vector would be as follows
>>> antibiotic = np.array([1./2, 1./2, 1])
And the resulting perturbation would be
>>> perturb(otus, antibiotic)
array([ 0.25, 0.25, 0.5 ])
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
import scipy.stats
import skbio.util
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def closure(mat):
"""
Performs closure to ensure that all elements add up to 1.
Parameters
----------
mat : array_like
a matrix of proportions where
rows = compositions
columns = components
Returns
-------
array_like, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Raises
------
ValueError
Raises an error if any values are negative.
ValueError
Raises an error if the matrix has more than 2 dimension.
ValueError
Raises an error if there is a row that has all zeros.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import closure
>>> X = np.array([[2, 2, 6], [4, 4, 2]])
>>> closure(X)
array([[ 0.2, 0.2, 0.6],
[ 0.4, 0.4, 0.2]])
"""
mat = np.atleast_2d(mat)
if np.any(mat < 0):
raise ValueError("Cannot have negative proportions")
if mat.ndim > 2:
raise ValueError("Input matrix can only have two dimensions or less")
if np.all(mat == 0, axis=1).sum() > 0:
raise ValueError("Input matrix cannot have rows with all zeros")
mat = mat / mat.sum(axis=1, keepdims=True)
return mat.squeeze()
@experimental(as_of="0.4.0")
def multiplicative_replacement(mat, delta=None):
r"""Replace all zeros with small non-zero values
It uses the multiplicative replacement strategy [1]_ ,
replacing zeros with a small positive :math:`\delta`
and ensuring that the compositions still add up to 1.
Parameters
----------
mat: array_like
a matrix of proportions where
rows = compositions and
columns = components
delta: float, optional
a small number to be used to replace zeros
If delta is not specified, then the default delta is
:math:`\delta = \frac{1}{N^2}` where :math:`N`
is the number of components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Raises
------
ValueError
Raises an error if negative proportions are created due to a large
`delta`.
Notes
-----
This method will result in negative proportions if a large delta is chosen.
References
----------
.. [1] <NAME>. "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation"
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import multiplicative_replacement
>>> X = np.array([[.2,.4,.4, 0],[0,.5,.5,0]])
>>> multiplicative_replacement(X)
array([[ 0.1875, 0.375 , 0.375 , 0.0625],
[ 0.0625, 0.4375, 0.4375, 0.0625]])
"""
mat = closure(mat)
z_mat = (mat == 0)
num_feats = mat.shape[-1]
tot = z_mat.sum(axis=-1, keepdims=True)
if delta is None:
delta = (1. / num_feats)**2
zcnts = 1 - tot * delta
if np.any(zcnts) < 0:
raise ValueError('The multiplicative replacement created negative '
'proportions. Consider using a smaller `delta`.')
mat = np.where(z_mat, delta, zcnts * mat)
return mat.squeeze()
@experimental(as_of="0.4.0")
def perturb(x, y):
r"""
Performs the perturbation operation.
This operation is defined as
.. math::
x \oplus y = C[x_1 y_1, \ldots, x_D y_D]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
y : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb(x,y)
array([ 0.0625, 0.1875, 0.5 , 0.25 ])
"""
x, y = closure(x), closure(y)
return closure(x * y)
@experimental(as_of="0.4.0")
def perturb_inv(x, y):
r"""
Performs the inverse perturbation operation.
This operation is defined as
.. math::
x \ominus y = C[x_1 y_1^{-1}, \ldots, x_D y_D^{-1}]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb_inv
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb_inv(x,y)
array([ 0.14285714, 0.42857143, 0.28571429, 0.14285714])
"""
x, y = closure(x), closure(y)
return closure(x / y)
@experimental(as_of="0.4.0")
def power(x, a):
r"""
Performs the power operation.
This operation is defined as follows
.. math::
`x \odot a = C[x_1^a, \ldots, x_D^a]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
a : float
a scalar float
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import power
>>> x = np.array([.1,.3,.4, .2])
>>> power(x, .1)
array([ 0.23059566, 0.25737316, 0.26488486, 0.24714631])
"""
x = closure(x)
return closure(x**a).squeeze()
@experimental(as_of="0.4.0")
def inner(x, y):
r"""
Calculates the Aitchson inner product.
This inner product is defined as follows
.. math::
\langle x, y \rangle_a =
\frac{1}{2D} \sum\limits_{i=1}^{D} \sum\limits_{j=1}^{D}
\ln\left(\frac{x_i}{x_j}\right) \ln\left(\frac{y_i}{y_j}\right)
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
inner product result
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import inner
>>> x = np.array([.1, .3, .4, .2])
>>> y = np.array([.2, .4, .2, .2])
>>> inner(x, y) # doctest: +ELLIPSIS
0.2107852473...
"""
x = closure(x)
y = closure(y)
a, b = clr(x), clr(y)
return a.dot(b.T)
@experimental(as_of="0.4.0")
def clr(mat):
r"""
Performs centre log ratio transformation.
This function transforms compositions from Aitchison geometry to
the real space. The :math:`clr` transform is both an isometry and an
isomorphism defined on the following spaces
:math:`clr: S^D \rightarrow U`
where :math:`U=
\{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
It is defined for a composition :math:`x` as follows:
.. math::
clr(x) = \ln\left[\frac{x_1}{g_m(x)}, \ldots, \frac{x_D}{g_m(x)}\right]
where :math:`g_m(x) = (\prod\limits_{i=1}^{D} x_i)^{1/D}` is the geometric
mean of :math:`x`.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr
>>> x = np.array([.1, .3, .4, .2])
>>> clr(x)
array([-0.79451346, 0.30409883, 0.5917809 , -0.10136628])
"""
mat = closure(mat)
lmat = np.log(mat)
gm = lmat.mean(axis=-1, keepdims=True)
return (lmat - gm).squeeze()
@experimental(as_of="0.4.0")
def clr_inv(mat):
r"""
Performs inverse centre log ratio transformation.
This function transforms compositions from the real space to
Aitchison geometry. The :math:`clr^{-1}` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`clr^{-1}: U \rightarrow S^D`
where :math:`U=
\{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
This transformation is defined as follows
.. math::
clr^{-1}(x) = C[\exp( x_1, \ldots, x_D)]
Parameters
----------
mat : array_like, float
a matrix of real values where
rows = transformed compositions and
columns = components
Returns
-------
numpy.ndarray
inverse clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr_inv
>>> x = np.array([.1, .3, .4, .2])
>>> clr_inv(x)
array([ 0.21383822, 0.26118259, 0.28865141, 0.23632778])
"""
return closure(np.exp(mat))
@experimental(as_of="0.4.0")
def ilr(mat, basis=None, check=True):
r"""
Performs isometric log ratio transformation.
This function transforms compositions from Aitchison simplex to
the real space. The :math: ilr` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`ilr: S^D \rightarrow \mathbb{R}^{D-1}`
The ilr transformation is defined as follows
.. math::
ilr(x) =
[\langle x, e_1 \rangle_a, \ldots, \langle x, e_{D-1} \rangle_a]
where :math:`[e_1,\ldots,e_{D-1}]` is an orthonormal basis in the simplex.
If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal
basis derived from Gram-Schmidt orthogonalization will be used by
default.
Parameters
----------
mat: numpy.ndarray
a matrix of proportions where
rows = compositions and
columns = components
basis: numpy.ndarray, float, optional
orthonormal basis for Aitchison simplex
defaults to J.J.Egozcue orthonormal basis.
check: bool
Specifies if the basis is orthonormal.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import ilr
>>> x = np.array([.1, .3, .4, .2])
>>> ilr(x)
array([-0.7768362 , -0.68339802, 0.11704769])
Notes
-----
If the `basis` parameter is specified, it is expected to be a basis in the
Aitchison simplex. If there are `D-1` elements specified in `mat`, then
the dimensions of the basis needs be `D-1 x D`, where rows represent
basis vectors, and the columns represent proportions.
"""
mat = closure(mat)
if basis is None:
basis = clr_inv(_gram_schmidt_basis(mat.shape[-1]))
else:
if len(basis.shape) != 2:
raise ValueError("Basis needs to be a 2D matrix, "
"not a %dD matrix." %
(len(basis.shape)))
if check:
_check_orthogonality(basis)
return inner(mat, basis)
@experimental(as_of="0.4.0")
def ilr_inv(mat, basis=None, check=True):
r"""
Performs inverse isometric log ratio transform.
This function transforms compositions from the real space to
Aitchison geometry. The :math:`ilr^{-1}` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`ilr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D`
The inverse ilr transformation is defined as follows
.. math::
ilr^{-1}(x) = \bigoplus\limits_{i=1}^{D-1} x \odot e_i
where :math:`[e_1,\ldots, e_{D-1}]` is an orthonormal basis in the simplex.
If an orthonormal basis isn't specified, the J. J. Egozcue orthonormal
basis derived from Gram-Schmidt orthogonalization will be used by
default.
Parameters
----------
mat: numpy.ndarray, float
a matrix of transformed proportions where
rows = compositions and
columns = components
basis: numpy.ndarray, float, optional
orthonormal basis for Aitchison simplex
defaults to J.J.Egozcue orthonormal basis
check: bool
Specifies if the basis is orthonormal.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import ilr
>>> x = np.array([.1, .3, .6,])
>>> ilr_inv(x)
array([ 0.34180297, 0.29672718, 0.22054469, 0.14092516])
Notes
-----
If the `basis` parameter is specified, it is expected to be a basis in the
Aitchison simplex. If there are `D-1` elements specified in `mat`, then
the dimensions of the basis needs be `D-1 x D`, where rows represent
basis vectors, and the columns represent proportions.
"""
if basis is None:
basis = _gram_schmidt_basis(mat.shape[-1] + 1)
else:
if len(basis.shape) != 2:
raise ValueError("Basis needs to be a 2D matrix, "
"not a %dD matrix." %
(len(basis.shape)))
if check:
_check_orthogonality(basis)
# this is necessary, since the clr function
# performs np.squeeze()
basis = np.atleast_2d(clr(basis))
return clr_inv(np.dot(mat, basis))
@experimental(as_of="0.5.5")
def alr(mat, denominator_idx=0):
r"""
Performs additive log ratio transformation.
This function transforms compositions from a D-part Aitchison simplex to
a non-isometric real space of D-1 dimensions. The argument
`denominator_col` defines the index of the column used as the common
denominator. The :math: `alr` transformed data are amenable to multivariate
analysis as long as statistics don't involve distances.
:math:`alr: S^D \rightarrow \mathbb{R}^{D-1}`
The alr transformation is defined as follows
.. math::
alr(x) = \left[ \ln \frac{x_1}{x_D}, \ldots,
\ln \frac{x_{D-1}}{x_D} \right]
where :math:`D` is the index of the part used as common denominator.
Parameters
----------
mat: numpy.ndarray
a matrix of proportions where
rows = compositions and
columns = components
denominator_idx: int
the index of the column (2D-matrix) or position (vector) of
`mat` which should be used as the reference composition. By default
`denominator_idx=0` to specify the first column or position.
Returns
-------
numpy.ndarray
alr-transformed data projected in a non-isometric real space
of D-1 dimensions for a D-parts composition
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import alr
>>> x = np.array([.1, .3, .4, .2])
>>> alr(x)
array([ 1.09861229, 1.38629436, 0.69314718])
"""
mat = closure(mat)
if mat.ndim == 2:
mat_t = mat.T
numerator_idx = list(range(0, mat_t.shape[0]))
del numerator_idx[denominator_idx]
lr = np.log(mat_t[numerator_idx, :]/mat_t[denominator_idx, :]).T
elif mat.ndim == 1:
numerator_idx = list(range(0, mat.shape[0]))
del numerator_idx[denominator_idx]
lr = np.log(mat[numerator_idx]/mat[denominator_idx])
else:
raise ValueError("mat must be either 1D or 2D")
return lr
@experimental(as_of="0.5.5")
def alr_inv(mat, denominator_idx=0):
r"""
Performs inverse additive log ratio transform.
This function transforms compositions from the non-isometric real space of
alrs to Aitchison geometry.
:math:`alr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D`
The inverse alr transformation is defined as follows
.. math::
alr^{-1}(x) = C[exp([y_1, y_2, ..., y_{D-1}, 0])]
where :math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
mat: numpy.ndarray
a matrix of alr-transformed data
denominator_idx: int
the index of the column (2D-composition) or position (1D-composition) of
the output where the common denominator should be placed. By default
`denominator_idx=0` to specify the first column or position.
Returns
-------
numpy.ndarray
Inverse alr transformed matrix or vector where rows sum to 1.
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import alr, alr_inv
>>> x = np.array([.1, .3, .4, .2])
>>> alr_inv(alr(x))
array([ 0.1, 0.3, 0.4, 0.2])
"""
mat = np.array(mat)
if mat.ndim == 2:
mat_idx = np.insert(mat, denominator_idx,
np.repeat(0, mat.shape[0]), axis=1)
comp = np.zeros(mat_idx.shape)
comp[:, denominator_idx] = 1 / (np.exp(mat).sum(axis=1) + 1)
numerator_idx = list(range(0, comp.shape[1]))
del numerator_idx[denominator_idx]
for i in numerator_idx:
comp[:, i] = comp[:, denominator_idx] * np.exp(mat_idx[:, i])
elif mat.ndim == 1:
mat_idx = np.insert(mat, denominator_idx, 0, axis=0)
comp = np.zeros(mat_idx.shape)
comp[denominator_idx] = 1 / (np.exp(mat).sum(axis=0) + 1)
numerator_idx = list(range(0, comp.shape[0]))
del numerator_idx[denominator_idx]
for i in numerator_idx:
comp[i] = comp[denominator_idx] * np.exp(mat_idx[i])
else:
raise ValueError("mat must be either 1D or 2D")
return comp
@experimental(as_of="0.4.0")
def centralize(mat):
r"""Center data around its geometric average.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
centered composition matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import centralize
>>> X = np.array([[.1,.3,.4, .2],[.2,.2,.2,.4]])
>>> centralize(X)
array([[ 0.17445763, 0.30216948, 0.34891526, 0.17445763],
[ 0.32495488, 0.18761279, 0.16247744, 0.32495488]])
"""
mat = closure(mat)
cen = scipy.stats.gmean(mat, axis=0)
return perturb_inv(mat, cen)
@experimental(as_of="0.4.1")
def ancom(table, grouping,
alpha=0.05,
tau=0.02,
theta=0.1,
multiple_comparisons_correction='holm-bonferroni',
significance_test=None,
percentiles=(0.0, 25.0, 50.0, 75.0, 100.0)):
r""" Performs a differential abundance test using ANCOM.
This is done by calculating pairwise log ratios between all features
and performing a significance test to determine if there is a significant
difference in feature ratios with respect to the variable of interest.
In an experiment with only two treatments, this tests the following
hypothesis for feature :math:`i`
.. math::
H_{0i}: \mathbb{E}[\ln(u_i^{(1)})] = \mathbb{E}[\ln(u_i^{(2)})]
where :math:`u_i^{(1)}` is the mean abundance for feature :math:`i` in the
first group and :math:`u_i^{(2)}` is the mean abundance for feature
:math:`i` in the second group.
Parameters
----------
table : pd.DataFrame
A 2D matrix of strictly positive values (i.e. counts or proportions)
where the rows correspond to samples and the columns correspond to
features.
grouping : pd.Series
Vector indicating the assignment of samples to groups. For example,
these could be strings or integers denoting which group a sample
belongs to. It must be the same length as the samples in `table`.
The index must be the same on `table` and `grouping` but need not be
in the same order.
alpha : float, optional
Significance level for each of the statistical tests.
This can can be anywhere between 0 and 1 exclusive.
tau : float, optional
A constant used to determine an appropriate cutoff.
A value close to zero indicates a conservative cutoff.
This can can be anywhere between 0 and 1 exclusive.
theta : float, optional
Lower bound for the proportion for the W-statistic.
If all W-statistics are lower than theta, then no features
will be detected to be differentially significant.
This can can be anywhere between 0 and 1 exclusive.
multiple_comparisons_correction : {None, 'holm-bonferroni'}, optional
The multiple comparison correction procedure to run. If None,
then no multiple comparison correction procedure will be run.
If 'holm-boniferroni' is specified, then the Holm-Boniferroni
procedure [1]_ will be run.
significance_test : function, optional
A statistical significance function to test for significance between
classes. This function must be able to accept at least two 1D
array_like arguments of floats and returns a test statistic and a
p-value. By default ``scipy.stats.f_oneway`` is used.
percentiles : iterable of floats, optional
Percentile abundances to return for each feature in each group. By
default, will return the minimum, 25th percentile, median, 75th
percentile, and maximum abundances for each feature in each group.
Returns
-------
pd.DataFrame
A table of features, their W-statistics and whether the null hypothesis
is rejected.
`"W"` is the W-statistic, or number of features that a single feature
is tested to be significantly different against.
`"Reject null hypothesis"` indicates if feature is differentially
abundant across groups (`True`) or not (`False`).
pd.DataFrame
A table of features and their percentile abundances in each group. If
``percentiles`` is empty, this will be an empty ``pd.DataFrame``. The
rows in this object will be features, and the columns will be a
multi-index where the first index is the percentile, and the second
index is the group.
See Also
--------
multiplicative_replacement
scipy.stats.ttest_ind
scipy.stats.f_oneway
scipy.stats.wilcoxon
scipy.stats.kruskal
Notes
-----
The developers of this method recommend the following significance tests
([2]_, Supplementary File 1, top of page 11): if there are 2 groups, use
the standard parametric t-test (``scipy.stats.ttest_ind``) or
non-parametric Wilcoxon rank sum test (``scipy.stats.wilcoxon``).
If there are more than 2 groups, use parametric one-way ANOVA
(``scipy.stats.f_oneway``) or nonparametric Kruskal-Wallis
(``scipy.stats.kruskal``). Because one-way ANOVA is equivalent
to the standard t-test when the number of groups is two, we default to
``scipy.stats.f_oneway`` here, which can be used when there are two or
more groups. Users should refer to the documentation of these tests in
SciPy to understand the assumptions made by each test.
This method cannot handle any zero counts as input, since the logarithm
of zero cannot be computed. While this is an unsolved problem, many
studies, including [2]_, have shown promising results by adding
pseudocounts to all values in the matrix. In [2]_, a pseudocount of 0.001
was used, though the authors note that a pseudocount of 1.0 may also be
useful. Zero counts can also be addressed using the
``multiplicative_replacement`` method.
References
----------
.. [1] <NAME>. "A simple sequentially rejective multiple test procedure".
Scandinavian Journal of Statistics (1979), 6.
.. [2] Mandal et al. "Analysis of composition of microbiomes: a novel
method for studying microbial composition", Microbial Ecology in Health
& Disease, (2015), 26.
Examples
--------
First import all of the necessary modules:
>>> from skbio.stats.composition import ancom
>>> import pandas as pd
Now let's load in a DataFrame with 6 samples and 7 features (e.g.,
these may be bacterial OTUs):
>>> table = pd.DataFrame([[12, 11, 10, 10, 10, 10, 10],
... [9, 11, 12, 10, 10, 10, 10],
... [1, 11, 10, 11, 10, 5, 9],
... [22, 21, 9, 10, 10, 10, 10],
... [20, 22, 10, 10, 13, 10, 10],
... [23, 21, 14, 10, 10, 10, 10]],
... index=['s1', 's2', 's3', 's4', 's5', 's6'],
... columns=['b1', 'b2', 'b3', 'b4', 'b5', 'b6',
... 'b7'])
Then create a grouping vector. In this example, there is a treatment group
and a placebo group.
>>> grouping = pd.Series(['treatment', 'treatment', 'treatment',
... 'placebo', 'placebo', 'placebo'],
... index=['s1', 's2', 's3', 's4', 's5', 's6'])
Now run ``ancom`` to determine if there are any features that are
significantly different in abundance between the treatment and the placebo
groups. The first DataFrame that is returned contains the ANCOM test
results, and the second contains the percentile abundance data for each
feature in each group.
>>> ancom_df, percentile_df = ancom(table, grouping)
>>> ancom_df['W']
b1 0
b2 4
b3 0
b4 1
b5 1
b6 0
b7 1
Name: W, dtype: int64
The W-statistic is the number of features that a single feature is tested
to be significantly different against. In this scenario, `b2` was detected
to have significantly different abundances compared to four of the other
features. To summarize the results from the W-statistic, let's take a look
at the results from the hypothesis test. The `Reject null hypothesis`
column in the table indicates whether the null hypothesis was rejected,
and that a feature was therefore observed to be differentially abundant
across the groups.
>>> ancom_df['Reject null hypothesis']
b1 False
b2 True
b3 False
b4 False
b5 False
b6 False
b7 False
Name: Reject null hypothesis, dtype: bool
From this we can conclude that only `b2` was significantly different in
abundance between the treatment and the placebo. We still don't know, for
example, in which group `b2` was more abundant. We therefore may next be
interested in comparing the abundance of `b2` across the two groups.
We can do that using the second DataFrame that was returned. Here we
compare the median (50th percentile) abundance of `b2` in the treatment and
placebo groups:
>>> percentile_df[50.0].loc['b2']
Group
placebo 21.0
treatment 11.0
Name: b2, dtype: float64
We can also look at a full five-number summary for ``b2`` in the treatment
and placebo groups:
>>> percentile_df.loc['b2'] # doctest: +NORMALIZE_WHITESPACE
Percentile Group
0.0 placebo 21.0
25.0 placebo 21.0
50.0 placebo 21.0
75.0 placebo 21.5
100.0 placebo 22.0
0.0 treatment 11.0
25.0 treatment 11.0
50.0 treatment 11.0
75.0 treatment 11.0
100.0 treatment 11.0
Name: b2, dtype: float64
Taken together, these data tell us that `b2` is present in significantly
higher abundance in the placebo group samples than in the treatment group
samples.
"""
if not isinstance(table, pd.DataFrame):
raise TypeError('`table` must be a `pd.DataFrame`, '
'not %r.' % type(table).__name__)
if not isinstance(grouping, pd.Series):
raise TypeError('`grouping` must be a `pd.Series`,'
' not %r.' % type(grouping).__name__)
if np.any(table <= 0):
raise ValueError('Cannot handle zeros or negative values in `table`. '
'Use pseudocounts or ``multiplicative_replacement``.'
)
if not 0 < alpha < 1:
raise ValueError('`alpha`=%f is not within 0 and 1.' % alpha)
if not 0 < tau < 1:
raise ValueError('`tau`=%f is not within 0 and 1.' % tau)
if not 0 < theta < 1:
raise ValueError('`theta`=%f is not within 0 and 1.' % theta)
if multiple_comparisons_correction is not None:
if multiple_comparisons_correction != 'holm-bonferroni':
raise ValueError('%r is not an available option for '
'`multiple_comparisons_correction`.'
% multiple_comparisons_correction)
if (grouping.isnull()).any():
raise ValueError('Cannot handle missing values in `grouping`.')
if (table.isnull()).any().any():
raise ValueError('Cannot handle missing values in `table`.')
percentiles = list(percentiles)
for percentile in percentiles:
if not 0.0 <= percentile <= 100.0:
raise ValueError('Percentiles must be in the range [0, 100], %r '
'was provided.' % percentile)
duplicates = skbio.util.find_duplicates(percentiles)
if duplicates:
formatted_duplicates = ', '.join(repr(e) for e in duplicates)
raise ValueError('Percentile values must be unique. The following'
' value(s) were duplicated: %s.' %
formatted_duplicates)
groups = np.unique(grouping)
num_groups = len(groups)
if num_groups == len(grouping):
raise ValueError(
"All values in `grouping` are unique. This method cannot "
"operate on a grouping vector with only unique values (e.g., "
"there are no 'within' variance because each group of samples "
"contains only a single sample).")
if num_groups == 1:
raise ValueError(
"All values the `grouping` are the same. This method cannot "
"operate on a grouping vector with only a single group of samples"
"(e.g., there are no 'between' variance because there is only a "
"single group).")
if significance_test is None:
significance_test = scipy.stats.f_oneway
table_index_len = len(table.index)
grouping_index_len = len(grouping.index)
mat, cats = table.align(grouping, axis=0, join='inner')
if (len(mat) != table_index_len or len(cats) != grouping_index_len):
raise ValueError('`table` index and `grouping` '
'index must be consistent.')
n_feat = mat.shape[1]
_logratio_mat = _log_compare(mat.values, cats.values, significance_test)
logratio_mat = _logratio_mat + _logratio_mat.T
# Multiple comparisons
if multiple_comparisons_correction == 'holm-bonferroni':
logratio_mat = np.apply_along_axis(_holm_bonferroni,
1, logratio_mat)
np.fill_diagonal(logratio_mat, 1)
W = (logratio_mat < alpha).sum(axis=1)
c_start = W.max() / n_feat
if c_start < theta:
reject = np.zeros_like(W, dtype=bool)
else:
# Select appropriate cutoff
cutoff = c_start - np.linspace(0.05, 0.25, 5)
prop_cut = np.array([(W > n_feat*cut).mean() for cut in cutoff])
dels = np.abs(prop_cut - np.roll(prop_cut, -1))
dels[-1] = 0
if (dels[0] < tau) and (dels[1] < tau) and (dels[2] < tau):
nu = cutoff[1]
elif (dels[0] >= tau) and (dels[1] < tau) and (dels[2] < tau):
nu = cutoff[2]
elif (dels[1] >= tau) and (dels[2] < tau) and (dels[3] < tau):
nu = cutoff[3]
else:
nu = cutoff[4]
reject = (W >= nu*n_feat)
feat_ids = mat.columns
ancom_df = pd.DataFrame(
{'W': pd.Series(W, index=feat_ids),
'Reject null hypothesis': | pd.Series(reject, index=feat_ids) | pandas.Series |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import arviz as az
def get_color(key):
""" return color for class key"""
color_dict = {0: "blue", 1: "red", 2: "green", 3: "orange", 4: "black"}
return color_dict[key]
def get_color_mean(key):
""" return mean color for class key"""
color_dict = {0: "yellow", 1: "black", 2: "green", 3: "orange", 4: "white"}
return color_dict[key]
def plot_data(cdata, nrows=50):
""" plot the dataframe """
# header data = x-values
x_val = cdata.get_x_val()
# plot rows (== observations) in a single figure
plt.figure(figsize=(12, 6))
ax = plt.axes()
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.set(xlabel='Wavenumber ($cm^{-1}$)')
# used to map label codes to colors
label_codes = pd.Categorical(cdata.data[cdata.label_column]).codes
# list of class labels
clabels = cdata.get_class_labels()
for i in range(len(clabels)):
print(str(clabels[i]) + ": " + get_color(i))
for i in range(nrows):
y_val = cdata.data.values[i]
plt.plot(x_val, y_val[:cdata.non_data_columns], '-', color=get_color(label_codes[i]))
plt.savefig('data_plot.png')
def plot_mean_vs_ppc(cdata, ppc_class_lst):
""" plot data mean vs. posterior samples """
# header data = x-values
x_val = cdata.get_x_val()
plt.figure(figsize=(12, 8))
ax = plt.axes()
ax.set(xlabel='Wavenumber ($cm^{-1}$)')
# plot a sample from the posterior (for each class)
for i in range(1):
for z in range(len(ppc_class_lst)):
plt.plot(x_val, ppc_class_lst[z][i, 0, :], '-', color=get_color(z), alpha=.6)
# list of class labels
class_labels = cdata.get_class_labels()
# plot the posterior mean
for z in range(len(ppc_class_lst)):
cls_label = str(class_labels[z]) + " ppc mean"
plt.plot(x_val, ppc_class_lst[z][:, 0].mean(axis=0), '-', color=get_color(z), alpha=.6, label=cls_label)
# plot mean data for classes (raw data)
df = [ cdata.data.loc[cdata.data[cdata.label_column] == class_labels[k]]
for k in range(len(class_labels)) ]
for z in range(len(df)):
cls_label = str(class_labels[z]) + " real mean"
plt.plot(x_val, df[z].iloc[:,:cdata.non_data_columns].mean(), '--', color=get_color_mean(z),
label=cls_label, linewidth=1)
# plot 94% HPD interval
for z in range(len(ppc_class_lst)):
col = "C" + str(z+1)
az.plot_hpd(x_val, ppc_class_lst[z], smooth=False, color=col)
plt.legend(loc='best')
def plot_real_vs_ppc(cdata, ppc_class_lst, nrows=10):
""" plot real data vs. posterior samples """
# header data = x-values
x_val = cdata.get_x_val()
plt.figure(figsize=(12, 8))
ax = plt.axes()
ax.set(xlabel='Wavenumber ($cm^{-1}$)')
# plot some samples from the posterior
for i in range(5):
for z in range(len(ppc_class_lst)):
plt.plot(x_val, ppc_class_lst[z][i, 0, :], 'o-', color="gray", alpha=.3)
# list of class labels
class_labels = cdata.get_class_labels()
# plot raw data for classes
df = [ cdata.data.loc[cdata.data[cdata.label_column] == class_labels[i]].sample(frac=1)
for i in range(len(class_labels)) ]
for i in range(nrows):
for z in range(len(df)):
plt.plot(x_val, df[z].values[i,:cdata.non_data_columns], '--', color=get_color(z), linewidth=1)
def append_predictions(cdata, trace, test_data, display=True):
""" appends predicted labels to the test dataframe """
# check model predictions on test dataset
a = trace['alpha'].mean()
b = trace['beta'].mean(axis=0)
xt_n = test_data.columns[:cdata.non_data_columns]
xt_s = test_data[xt_n].values
xt_s = (xt_s - xt_s.mean(axis=0)) / xt_s.std(axis=0)
mu_t = a + (b * xt_s).sum(axis=1)
yt_p = 1 / (1 + np.exp(-mu_t))
pt_y = np.zeros(len(xt_s))
lp_t = []
class_labels = cdata.get_class_labels()
for i in range(len(xt_s)):
if yt_p[i] < 0.5:
pt_y[i] = 0
lp_t.append(class_labels[0])
else:
pt_y[i] = 1
lp_t.append(class_labels[1])
#test_data = test_data.assign(pred=pd.Series(pt_y))
test_data = test_data.assign(p_label=pd.Series(lp_t))
if display:
print(test_data.iloc[:, (cdata.non_data_columns-1):])
return test_data
def append_predictions_ppc(cdata, trace, display=True):
""" appends predicted labels to the dataframe """
# check model predictions on test dataset
a = trace['alpha'].mean()
b = trace['beta'].mean(axis=0)
xt_n = cdata.data.columns[:cdata.non_data_columns]
xt_s = cdata.data[xt_n].values
xt_s = (xt_s - xt_s.mean(axis=0)) / xt_s.std(axis=0)
mu_t = a + (b * xt_s).sum(axis=1)
yt_p = 1 / (1 + np.exp(-mu_t))
pt_y = np.zeros(len(xt_s))
lp_t = []
class_labels = cdata.get_class_labels()
for i in range(len(xt_s)):
if yt_p[i] < 0.5:
pt_y[i] = 0
lp_t.append(class_labels[0])
else:
pt_y[i] = 1
lp_t.append(class_labels[1])
#cdata.data = cdata.data.assign(pred=pd.Series(pt_y))
cdata.data = cdata.data.assign(p_label=pd.Series(lp_t))
if display:
print (cdata.data.iloc[:,(cdata.non_data_columns-1):])
def get_score(data, label_column, predicted_column):
""" calculates the logreg score for a single column """
yt = pd.Categorical(data[label_column]).codes
cor = 0; err = 0
for i in range(len(yt)):
if data[label_column][i] == data[predicted_column][i]:
cor += 1
else:
err += 1
tot = len(yt)
score = f'{cor / len(yt) * 100:.1f}'
return tot, cor, err, score
def logistic_score(data, label_column, predicted_column, kfold=False):
""" calculates and prints the logistic score """
if kfold:
print('n tot cor err score (%)')
print('----------------------------')
ttot = 0; tcor = 0; terr = 0
for i in range(len(data)):
tot, cor, err, score = get_score(data[i], label_column, predicted_column)
print(str(i+1) + " " + str(tot) + " " + str(cor) + " " + str(err) + " " + score)
ttot += tot; tcor += cor; terr += err
print('----------------------------')
print(" " + str(ttot) + " " + str(tcor) + " " + str(terr) + " " + f'{tcor / ttot * 100:.1f}')
else:
tot, cor, err, score = get_score(data, label_column, predicted_column)
print("total : " + str(tot))
print("correct: " + str(cor))
print("error : " + str(err))
print("score : " + score + "%")
def logistic_score_ppc(cdata, predicted_column):
""" calculates and prints the logistic regression score """
yt = | pd.Categorical(cdata.data[cdata.label_column]) | pandas.Categorical |
#!/usr/bin/python3
import datetime
import pandas as pd
from pandas import DataFrame
import re
import json
import numpy as np
import requests
import lxml.etree
from dev_global.env import TIME_FMT
from libmysql_utils.mysql8 import (mysqlBase, mysqlHeader, Json2Sql)
from requests.models import HTTPError
from venus.form import formStockManager
from venus.cninfo import spiderBase
from mars.utils import read_json
__version__ = '1.0.10'
class StockBase(mysqlBase):
"""
param header: mysqlHeader
"""
def __init__(self, header: mysqlHeader) -> None:
# if not isinstance(header, mysqlHeader):
# raise HeaderException("Error due to incorrect header.")
super(StockBase, self).__init__(header)
# date format: YYYY-mm-dd
self._Today = datetime.date.today().strftime(TIME_FMT)
# date format: YYYYmmdd
self._today = datetime.date.today().strftime('%Y%m%d')
# self.TAB_STOCK_MANAGER = "stock_manager"
self.j2sql = Json2Sql(header)
@property
def Today(self) -> str:
"""
Format: 1983-01-22
"""
self._Today = datetime.date.today().strftime(TIME_FMT)
return self._Today
@property
def today(self) -> str:
"""
Format: 19830122
"""
self._today = datetime.date.today().strftime('%Y%m%d')
return self._today
def get_all_stock_list(self) -> list:
"""
Return stock code --> list.
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='t').all()
df = pd.DataFrame.from_dict(query_stock_code)
stock_list = df['stock_code'].tolist()
# should test if stock list is null
return stock_list
def get_all_index_list(self):
"""
Return stock code --> list.
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='i').all()
df = pd.DataFrame.from_dict(query_stock_code)
stock_list = df['stock_code'].tolist()
return stock_list
def get_all_security_list(self):
"""
Return stock code --> list
"""
# Return all kinds of securities in form stock list.
# Result : List type data.
query_stock_code = self.session.query(formStockManager.stock_code).all()
df = pd.DataFrame.from_dict(query_stock_code)
stock_list = df['stock_code'].tolist()
return stock_list
@staticmethod
def get_html_object(url: str, HttpHeader=None):
"""
result is a etree.HTML object
"""
response = requests.get(url, HttpHeader=None, timeout=3)
if response.status_code == 200:
# setting encoding
response.encoding = response.apparent_encoding
html = lxml.etree.HTML(response.text)
else:
html = None
raise HTTPError(f"Status code: {response.status_code} for {url}")
return html
@staticmethod
def get_excel_object(url: str) -> pd.DataFrame:
df = pd.read_excel(url)
return df
@staticmethod
def set_date_as_index(df):
df['date'] = | pd.to_datetime(df['date'], format=TIME_FMT) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
def join_cols(row):
return ''.join(list(row))
def find_favor(seq):
t = []
for m in re.finditer(seq, DNA):
t += [m.start()]
return t
DNA = np.loadtxt('./data/DNA.txt', str)
DNA = ''.join(DNA)
print('DNA Length = {} '.format(len(DNA)) )
start_idxs = []
for m in re.finditer('GTC', DNA):
start_idxs += [m.start()]
start_idxs = np.array(start_idxs)
df = pd.DataFrame()
df['loc'] = np.arange(len(DNA))
df['start_ind'] = 0
df.loc[start_idxs,'start_ind'] = 1
favor = pd.read_csv('./data/favor_seqs.csv')
gtc_loc = list(favor.iloc[0,:])[0].find('GTC')
red_idxs = []
for detsize in range(3,4):
dets = favor['seq'].str[ gtc_loc-detsize:gtc_loc + 3 + detsize]
dets = list(np.unique(dets))
detslocs = list(map(find_favor, dets))
detslocs = [x for x in detslocs if len(x) > 1]
for tlocs in detslocs:
mean_dist = np.mean(np.diff(tlocs))
median_dist = np.median(np.diff(tlocs))
if(mean_dist > 1000 and mean_dist < 6000
or
median_dist > 1000 and median_dist < 6000):
red_idxs += [tlocs]
red_idxs = [item for sublist in red_idxs for item in sublist]
plt.figure(figsize=(16,4))
plt.bar(start_idxs, [0.3]*len(start_idxs), width=64, color='black', alpha=0.8)
plt.bar(red_idxs, [1]*len(red_idxs), width=64, color='red')
plt.ylim([0,1])
plt.xlim([0,len(DNA)])
plt.xlabel('DNA nucleotide index')
plt.yticks([])
plt.xticks([])
plt.title('\"Intresting\" Sequences')
plt.legend(['GTC Locations','Intresting Frequency Locations'], facecolor=(1,1,1,1), framealpha=0.98 )
plt.savefig('./out/favor_seqs_k_3.png')
plt.show()
#%% Prim VS Primon when POLY is saturated
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def ms(t):
return t/np.max(t)
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-4]) ))
tcols = df.columns
tcols = list(tcols[:-4]) + ['poly','prim','primo','seq']
df.columns = tcols
df['primo-prim'] = df['primo'] - df['prim']
labels = ['poly','primo','prim','primo-prim']
df = df.sort_values('poly').reset_index(drop=True)
sm = 100
plt.figure(figsize=(12,8))
for i, lab in enumerate(labels):
plt.subplot(4,1,i+1)
if(i != 3):
df = df.sort_values(lab).reset_index(drop=True)
y = df[lab].copy()
if(i != 3):
y = mms( y )**0.5
y = y.rolling(sm).mean().drop(np.arange(sm)).reset_index(drop=True)
y = pd.Series(y)
plt.plot(np.arange(len(y)),y, alpha=0.8)
plt.title(lab + ' sorted by self')
plt.ylabel(' ln(score)' )
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)
#%% Collect favorite sequences
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
labels = ['poly','primo','prim']
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-3]) ))
# keep favorite seuqnces (1000~6000 reps)
df_test = pd.read_csv('./data/validation.csv')
df.index = df['seq']
df = df.loc[df_favor['seq'],:]
df = df.dropna(axis=0).reset_index(drop=True)
df.columns = list(df.columns[:-4]) + ['poly', 'prim', 'primo', 'seq']
# keep non test set sequences
toDrop = df_test['seq']
df.index = df['seq']
df = df.drop(toDrop, axis=0, errors='ignore')
df = df.reset_index(drop=True)
print('lets unite the data by seq and watch the mean and std of each sequence')
dfm = pd.DataFrame()
dfm['primo'] = mms(df.groupby('seq').median()['primo'])
dfm['primo_std'] = mms(df.groupby('seq').std()['primo'])#/mms( df.groupby('seq').mean()['primo'] )
dfm['prim'] = mms(df.groupby('seq').median()['prim'])
dfm['prim_std'] = mms(df.groupby('seq').std()['prim'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['poly'] = mms(df.groupby('seq').median()['poly'])
dfm['poly_std'] = mms(df.groupby('seq').std()['poly'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['seq'] = dfm.index
dfm = dfm.reset_index(drop=True)
T1 = np.percentile(dfm['primo'], 95)
T2 = np.percentile(dfm['primo_std'], 90)
T3 = np.percentile(dfm['prim'], 95)
T4 = np.percentile(dfm['prim_std'], 90)
T5 = np.percentile(dfm['poly'], 95)
T6 = np.percentile(dfm['poly_std'], 90)
print('length of dfm before outlier cleaning = {}'.format(len(dfm)) )
dfm = dfm.drop(np.where(dfm['primo'] > T1 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['primo_std'] > T2 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim'] > T3 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim_std'] > T4 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly'] > T5 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly_std'] > T6 )[0]).reset_index(drop=True)
print('length of dfm after outlier cleaning = {}'.format(len(dfm)) )
nucs = np.array(list(map(list, dfm['seq']))).copy()
nucs = pd.DataFrame(nucs.copy())
nucs = nucs.add_suffix('_nuc')
nucs = nucs.reset_index(drop=True)
dfm = pd.concat([dfm, nucs], axis=1)
dfm = dfm.reset_index(drop=True)
toKeep = [x for x in dfm.columns if 'std' not in x]
dfm = dfm.loc[:,toKeep]
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab])
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab]**0.5)
dfm.to_csv('data/chip_B_favor.csv', index=False)
#%% Heatmap of ABS Correlation
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def count_letters(df_nucs, rep_dict):
X = df_nucs.copy()
X = X.replace(rep_dict)
X = np.array(X)
X = np.sum(X,1)
return X
df = pd.read_csv('data/chip_B_favor.csv')
cols = df.columns
cols = [x for x in cols if 'nuc' in x]
df_nucs = df.loc[:,cols].copy()
df_labels = df.loc[:,['primo','prim','poly']]
df_res = pd.DataFrame()
# count appereances of each individual letter
for letter in ['A','C','G','T']:
rep_dict = {'A':0,'C':0,'G':0,'T':0}
rep_dict[letter] = 1
df_res['{}_count'.format(letter) ] = count_letters(df_nucs, rep_dict)
gtc_ind_start = ''.join( list(df_nucs.iloc[0,:]) ).find('GTC') - 5
gtc_ind_end = gtc_ind_start + 5 + 3 + 5
# extract puryn and prymidin densities
# A,<NAME>
# C,T Prymidins
""" =================== Left Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Left_Pur_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Left_Pry_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
""" =================== Center / Determinant Count ===================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Center_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Center_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
""" =================== Right Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Right_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Right_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
df_res = pd.concat([df_res, df_labels], axis=1)
plt.figure(figsize=(12,8))
df_corr = (df_res.corr().abs())
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
plt.figure(figsize=(12,8))
df_corr = df_corr.loc[['primo','prim','poly'],['primo','prim','poly']]
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
#%% K mers spectrum
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import entropy
NMERS = [1,2,3]
df = pd.read_csv('./data/chip_B_favor.csv')
labels = ['primo','prim','poly']
np.random.RandomState(42)
df.index = df['seq']
m2 = 'CCACCCCAAAAAACCCCGTCAAAACCCCAAAAACCA'
df.loc[m2,'primo']
im = plt.imread(r'C:\Users\Ben\Desktop/Picture1.png')
x = list(range(1,14))
y = [1,
0,
0.4,
0.6,
0.47,
0.13,
0.2,
0.3,
0.5,
0.46,
0.5,
0.67,
0.8]
x= np.array(x)
y= np.array(y)
plt.imshow(im)
plt.scatter(x,y, c='red')
#for col in labels:
#df = df.drop(np.where(df[col] > np.percentile(df[col],95))[0],axis=0).reset_index(drop=True)
#df = df.drop(np.where(df[col] < np.percentile(df[col],5))[0],axis=0).reset_index(drop=True)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
plt.figure(figsize=(18,16))
for i, N in enumerate(NMERS):
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_mer = np.sum(df_mer)
df_mer = df_mer/np.sum(df_mer)
df_mer = df_mer[(df_mer >= 0.01 )]
plt.subplot(len(NMERS),1,i+1)
plt.scatter(np.arange(len(df_mer)), df_mer, color=(['blue','red','green'])[i] )
plt.xticks(np.arange(len(df_mer)), df_mer.index, rotation=90)
#plt.legend([' Variance: {}'.format( np.var(df_mer)) ])
plt.title('{}-Mer'.format(N) )
plt.ylim([0, 0.3])
plt.ylabel('mer frequency')
#%% K-MEANS and Hirarchial clustering
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
NLIST = [5]
labels = ['poly','prim','primo']
labels = ['primo']
ShowTextOnDendogram = True
showKM = True
showHC = False
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
df_backup = df.copy()
# =============================================================================
# Hirarchical Clustering
# =============================================================================
from scipy.cluster import hierarchy
if(showHC):
#WORKS FINE
X = df_backup.drop(labels,axis=1).copy()
X = X.iloc[:,:].reset_index(drop=True)
Z = hierarchy.linkage(X, method='ward')
Z = pd.DataFrame(Z)
botline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])),-2] * 1.05
topline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])) + 1, -2] * 0.95
fig = plt.figure(figsize=(4, 6))
dn = hierarchy.dendrogram(Z, p=7, truncate_mode='level', color_threshold=40, distance_sort=True)
plt.hlines([botline, topline], xmin=0, xmax=len(Z), ls='--', alpha = 0.9 )
plt.ylabel('Ward Distance')
disticks = np.unique(np.sqrt(Z.iloc[:,-2]).astype(int))
#plt.yticks( disticks**2 , disticks)
plt.xticks([])
plt.xlabel('')
Z = hierarchy.linkage(X, method='ward')
X[labels] = df_backup[labels].copy()
thr = 40
dists = [ 20, 40, 80, 120]
fntsze = 22
thr = 40
for i, thr in enumerate(dists):
Xg = X.copy()
Xg['bin'] = hierarchy.fcluster(Z, thr, criterion='distance', depth=5, R=None, monocrit=None)
Xres = Xg.groupby('bin').sum()
Xres[labels] = Xg.groupby('bin').median()[labels]
xcount = Xg.copy()
xcount['count'] = 1
xcount = xcount.groupby('bin').sum()['count']
xcnew = [xcount.iloc[0]/2]
for j in xcount.index[1:]:
xcnew += [np.sum(xcount[:j-1]) + xcount[j]/2]
xcount = pd.Series( xcnew )
xcount.index = xcount.index + 1
#plt.subplot(4,1, i+1 )
#plt.scatter(Xres.index, Xres[labels])
toKeep = [x for x in X.drop(labels, axis=1).columns if '36' not in x]
Xres = (Xres.loc[:,toKeep])
Xres.columns = [x[-1] for x in Xres.columns]
Xres = Xres.T
Xres = Xres.groupby(Xres.index).sum()
for col in Xres.columns:
Xres[col] = Xres[col] / np.sum(Xres[col])
Xres = Xres.T
row_idx = 1
for row_idx in Xres.index:
row = Xres.loc[row_idx,:]
print(
xcount.iloc[row_idx-1]
)
accumsize = 0
for dx, lett in enumerate(row.index):
x_rng = plt.gca().get_xlim()[1]
# =============================================================================
# # ADDING TEXT TO DENDOGRAM
# =============================================================================
if(ShowTextOnDendogram == True):
plt.text(x= xcount.iloc[row_idx-1]*x_rng/len(Xg) + accumsize,
y=thr, horizontalalignment='left',
s=lett, fontsize=np.max([fntsze*row[lett], 6]) ,
weight='normal', fontname='arial')
accumsize += np.max([fntsze*row[lett], 8]) + 36
#% TODO MAKE THIS PRETTY
from sklearn.metrics import silhouette_score
res_ss = []
xvec = [5]
for i in xvec:
X = df.copy().drop(['bin'], axis=1, errors='ignore')
X = X.drop(labels, axis=1)
tmp_ss = []
for j in range(1):
km = KMeans(i, random_state=j )
y = km.fit_predict(X)
ss = silhouette_score( X, y )
tmp_ss += [ss]
print('sil score => mean: {} | std: {}'.format(np.mean(tmp_ss), np.std(tmp_ss)) )
res_ss += [np.mean(tmp_ss)]
plt.figure()
plt.scatter(xvec,res_ss)
plt.xlabel('K-Value')
plt.ylabel('Sil Score')
plt.show()
if(showKM):
col = 'primo'
plt.figure(figsize=(6,4))
for i, Nbins in enumerate(NLIST):
df = df_backup.copy()
km = KMeans(Nbins, random_state=42 )
df['bin'] = km.fit_predict(df.drop(labels,axis=1))
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4,
4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
tdf = df.loc[:,['bin',col]]
#rep_d = {0:'A',1:'B',2:'C',3:'D',4:'E'}
rep_d = {0:2,1:3,2:0,3:1,4:4}
df['bin'] = df['bin'].replace(rep_d)
centers = list(np.array(centers)[list(rep_d.values())])
print('Mean Words:')
print(centers)
#rep_d = {'A':2,'B':3,'C':0,'D':1,'E':4}
#df['bin'] = df['bin'].replace(rep_d)
plt.subplot(len(NLIST),1,i+1)
sns.violinplot(x="bin", y=col, data=df, palette="Blues", cut=0)
plt.ylim([-0.2, 1.2])
plt.ylabel('Primase \nBinding Scores', fontsize=12)
plt.title('Scores Distribution by Cluster', fontsize=12)
"""
for tx, tcent in zip(np.arange(np.max(tdf['bin'])+1) , centers):
chunks, chunk_size = len(tcent), len(tcent)//6
stlist = [ tcent[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
tcent = '\n'.join(stlist)
t = plt.text(x=tx-0.5, y=0, s=tcent, fontsize=10, color='red', fontweight='normal', backgroundcolor='white')
t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white'))
plt.xlim([-1, Nbins-1 + 0.5])
"""
#plt.xticks( np.arange(np.max(tdf['bin'])+1)
#,centers , rotation=-90, fontsize=12)
plt.yticks( [0,0.25,0.5,0.75,1], fontsize=12 )
plt.tight_layout()
plt.savefig('./out/kmeans/forpaper_B_centroids_' + str(Nbins) + 'bins')
plt.show()
#plt.close()
#%% PCA
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
NMERS = [3]
df = pd.read_csv('./data/chip_B_favor.csv')
#labels = ['primo','prim','poly']
labels = ['primo']
np.random.RandomState(42)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
"""
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
"""
for N in NMERS:
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
pca = PCA(n_components=np.min([16,len(df_mer.columns)]), svd_solver='auto', random_state=42)
df_mer = pd.DataFrame(pca.fit_transform(df_mer.dropna(axis=1)))
df_mer = df_mer.add_prefix('pc')
#MMS -1 1
for col in df_mer.columns:
df_mer[col] = mms(df_mer[col])
for col in labels:
df_mer[col] = df[col]
np.cumsum(pca.explained_variance_ratio_)
1/0
# 3D scatter
for lab in labels:
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111, projection='3d')
x = df_mer['pc0']
y = df_mer['pc1']
z = df_mer['pc2']
clrs = mms( (df_mer[lab]) )
ax.scatter3D(2*x + 0.05*np.random.randn(len(x)) ,
2*y + 0.05*np.random.randn(len(y)) ,
2*z + 0.05*np.random.randn(len(z)) ,
alpha=0.6, c=clrs, cmap='bwr')
plt.xlabel('pc0')
plt.ylabel('pc1')
ax.set_zlabel('pc2')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 3D Projection """
#plt.close()
fig = plt.figure(figsize=(14,10))
x = df_mer['pc0']
y = df_mer['pc1']
plt.scatter( x-0.5, #+ 0.05*np.random.randn(len(x)) ,
y-0.5, #+ 0.05*np.random.randn(len(y)) ,
alpha=0.6, c=clrs, cmap='bwr' )
plt.xlabel('pc0')
plt.ylabel('pc1')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.savefig('./out/pca/{}_{}mer'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 2D Projection """
#plt.close()
#%% Dynamic clustering and prediction
"""
This techinique invloves all of our research,
by using PCA we learn the existence of 5 clusters,
by using kmeans we classify each sequence to its cluster,
by using regressors suchj as lasso we train a model for each cluster
and predict labels with high resolution.
we can compare results with or without dynamic clustering.
"""
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pickle
from time import clock, sleep
[plt.close() for x in plt.get_fignums()]
N = 3
with_clustering = True
stime = clock()
#labels = ['poly','prim','primo']
labels = ['primo']
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
# apply KMEANS
km = KMeans(5, random_state=42, n_init=20 )
bins_pred = km.fit_predict(df.drop(labels,axis=1))
pickle.dump(km, open('./out/regressors/models/km.sav' , 'wb') )
t = km.cluster_centers_
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4, 4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
df = pd.read_csv('./data/chip_B_favor.csv')
df['bin'] = bins_pred
"""
# Hard To Predict (HTP) Generator
htpgen = pd.DataFrame(np.random.randint(0,4,[5000, 36])).replace({0:'A',1:'C',2:'G',3:'T'})
htpgen = htpgen.add_suffix('_nuc')
htpgen = OHE(htpgen)
htpgen['bin'] = km.predict(htpgen)
# Easy To Predict (HTP) Generator
etpgen = pd.DataFrame(np.random.randint(0,4,[5000, 36])).replace({0:'A',1:'C',2:'G',3:'T'})
etpgen = etpgen.add_suffix('_nuc')
etpgen = OHE(etpgen)
etpgen['bin'] = km.predict(etpgen)
t = np.array(htpgen.iloc[:,-1])
1/0
"""
from itertools import product
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_validate
#from sklearn.linear_model import LassoLarsIC
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
import xgboost as xgb
test_df = pd.read_csv('./data/validation.csv').loc[:,['seq','toKeep','label']]
test_df = test_df.iloc[np.where(test_df['toKeep'] > 0)[0],:].reset_index(drop=True)
test_df = test_df.loc[:,['seq','label']]
splitted = pd.DataFrame(np.zeros([len(test_df),36]))
splitted = splitted.add_suffix('_nuc')
for i,seq in enumerate(test_df['seq']):
splitted.iloc[i,:] = list(seq)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
splitted = OHE(splitted)
splitted['bin'] = km.predict(splitted)
test_df['bin'] = splitted['bin']
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
#Train preparation
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
df_mer['seq'] = df['seq']
#forFUN
ACOUNT = [ x.count('A') for x in df['seq'] ]
CCOUNT = [ x.count('C') for x in df['seq'] ]
GCOUNT = [ x.count('G') for x in df['seq'] ]
TCOUNT = [ x.count('T') for x in df['seq'] ]
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
X = df_mer.copy()
X['bin'] = df['bin']
#plt.plot( (X.sum()[:-2]).sort_values() )
#X.iloc[:,:-2] = X.iloc[:,:-2]/list(np.sum(X.iloc[:,:-2]))
train = X.copy()
y = df[labels]
"""
Drek = pd.concat([train.drop('seq',axis=1), pd.DataFrame(y)], axis=1)
Drek.iloc[:,:-1] /= Drek.iloc[:,:-1].max()
Drek = Drek.drop('GTC',axis=1, errors='ignore')
Drek = Drek.corr('spearman').abs()
plt.figure(figsize=(12,12))
sns.heatmap(Drek, cmap='bwr')
plt.show()
1/0
"""
#Test preparation
df_mer = pd.DataFrame(np.zeros([len(test_df), len(combs_list)]))
df_mer.columns = combs_list
mers = test_df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
test = df_mer.copy()
test['bin'] = test_df['bin']
y_test = test_df['label']
X_test = test.copy().reset_index(drop=True)
y_test = y_test.copy().reset_index(drop=True)
p_test = np.zeros(len(y_test))
X_train = train.copy().reset_index(drop=True)
if( with_clustering == False):
X_train['bin'] = 0
y_train = y.copy().reset_index(drop=True)
mean_mae_per_lab = []
df_results = pd.DataFrame()
res_label = []
res_tbin = []
res_mae = []
res_fi = []
res_bias = []
bin_weights = []
tstr = ''
for lab in labels:
mean_mae_per_bin = []
print("\n==============================")
print('label = {}'.format(lab) )
ber = pd.DataFrame(np.zeros([5,len(np.unique(X_train['bin']))]))
ber = ber.add_prefix('bin_')
for tbin in np.unique(X_train['bin']):
"""
drek = X_train.copy()
drek['primo'] = y_train.copy()
drek = drek.sort_values(['bin','primo']).reset_index(drop=True)
xax = []
for i in range(5):
xax += list(range(sum(drek['bin'] == i)))
drek['xax'] = xax
plt.figure(figsize=(8,8))
sns.lineplot( x='xax' ,y='primo', hue='bin', data=drek )
"""
test_strong = pd.DataFrame()
test_weak = pd.DataFrame()
yv = (y_train.loc[:,lab].iloc[np.where(X_train['bin'] == tbin)[0]])
Xv = X_train.iloc[np.where(X_train['bin'] == tbin)[0]].copy().drop(['bin','seq'],axis=1)
#plt.figaspect(1)
#h_0 = np.histogram(yv, bins=len(yv))
#cdf_0 = np.cumsum(np.sort( h_0[0]/len(yv)))
#plt.plot( [0] + list(h_0[1][1:]), [0] + list(cdf_0) )
#plt.plot( [0,1],[0,1] )
#tb = pd.concat([Xv, yv], axis=1)
#plt.plot( np.sort( 1/np.sort(h_0[0]) *yv) )
"""
Drek = pd.concat([Xv, pd.DataFrame(yv)], axis=1)
Drek.iloc[:,:-1] /= Drek.iloc[:,:-1].max()
Drek = Drek.drop('GTC',axis=1)
Drek = Drek.corr().abs()
plt.figure()
sns.heatmap(Drek, cmap='bwr')
plt.show()
continue
"""
print(len(Xv))
tst_idxs = np.where(X_test['bin'] == tbin)[0]
tst_idxs = np.array(list(tst_idxs))
if( len(tst_idxs) != 0 ):
yt = y_test.iloc[tst_idxs].copy()
#initiate Test Set
test_strong = X_test.iloc[yt[yt==1].index].drop('bin',axis=1)
test_weak = X_test.iloc[yt[yt==0].index].drop('bin',axis=1)
#reg = LassoLarsIC('bic', max_iter=200, fit_intercept=False, positive=True)
#reg = LassoLarsIC('bic', max_iter=200, normalize=False, fit_intercept=False, positive=True)
#reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
# max_depth = 8, alpha = 10, n_estimators = 10)
# Regression Fitting
from copy import deepcopy
regs = []
tmp_preds = []
for rs in range(5):
""" We are going to test several regressors:
KNN, RBF-SVM, Linear-SVM, RF, XGBOOST
"""
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
#reg = RandomForestRegressor(max_depth = 8, random_state=rs)
#reg = LassoLarsIC('aic', max_iter=200, normalize=False, fit_intercept=False, positive=True)
#reg = KNeighborsRegressor(n_neighbors=2)
#reg = Lasso(alpha=0.00025, normalize=False, fit_intercept=True, positive=False)
reg = Lasso(alpha=0.00025, normalize=True, fit_intercept=False, positive=True) # This is the model we actually use
#reg = KNeighborsRegressor(15)
#reg = SVR(kernel='rbf')
#reg = SVR(kernel='linear')
#reg = RandomForestRegressor()
#reg = xgb.XGBRegressor()
idxs_pool = list(Xv.index)
train_idxs = np.random.choice( idxs_pool, size=4*len(idxs_pool)//5, replace=False )
train_idxs = np.sort(train_idxs)
tX = Xv.loc[train_idxs,:].copy()
ty = yv.loc[train_idxs].copy()
pX = Xv.drop(labels=train_idxs).copy()
py = yv.drop(labels=train_idxs).copy()
tX = tX
pX = pX
reg.fit(tX, ty)
pred = reg.predict(pX)
from sklearn.metrics import mean_absolute_error
print('K-Fold Iter: {}, MAE: {:2.3f}'.format(rs, mean_absolute_error(py,pred)) )
tmp_preds += [pred.copy()]
regs += [deepcopy(reg)]
ber.iloc[rs,tbin] = mean_absolute_error(py,pred)
#plt.plot( np.arange(len(py)), pd.Series(np.abs(py - pred)).expanding().mean() )
from sklearn.metrics import mean_squared_error
print('RMSE: {:2.3f}'.format( np.sqrt(mean_squared_error(py,pred)) ) )
print('BER: {:2.3f}'.format(np.mean(ber.iloc[:,tbin])) )
print('==================\nTotal BER: {:2.3f}'.format(np.mean(np.mean(ber))) )
reg = regs[np.argmin(np.array(ber.iloc[:,tbin]))]
pred = tmp_preds[np.argmin(np.array(ber.iloc[:,tbin]))]
if(with_clustering == False):
plt.scatter(py,pred, alpha=0.8, s=4, zorder=2 )
plt.plot([0,1],[0,1])
plt.xlabel('True')
plt.ylabel('Prediction')
plt.gca().set_aspect('equal')
"""
else:
plt.scatter(py,pred, alpha=0.8, s=4, zorder=2 )
if(tbin == 4):
plt.plot([0,1],[0,1], color='black', ls='--', zorder = 1, alpha=0.8)
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.gca().set_aspect('equal')
plt.legend( ['y=x'] + list(centers), fontsize='x-small')
plt.xlabel('true')
plt.ylabel('pred')
"""
"""
res = cross_validate(reg, Xv , y=yv, groups=None,
scoring='neg_mean_absolute_error', cv=5, n_jobs=5, verbose=0,
fit_params=None, return_estimator=True)
best_estimator = res['estimator'][np.argmax(res['test_score'])]
"""
best_estimator = reg
ber['test_score'] = -ber.iloc[:,tbin].copy()
res = ber.copy()
mean_estimator_mae = -np.mean(res['test_score'])
mean_estimator_std = np.std(res['test_score'])
print('\033[1m cv mean: {:2.3f} | cv std: {:2.3f} \033[0m'.format(mean_estimator_mae, mean_estimator_std) )
# Save best model and collect resutls
pickle.dump(best_estimator, open('./out/regressors/models/{}_{}.sav'.format(lab, tbin) , 'wb') )
tmp_err = np.min(-res['test_score'])
#mean_mae_per_bin += [ tmp_err*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_bin += [ tmp_err ]
#print(lab + ' => bin: ' + str(tbin) + ' | MAE: {:2.3f}'.format(tmp_err) )
tstr = tstr + lab + ' => bin: ' + str(tbin) + ' | MAE: {:2.3f}\n'.format(tmp_err)
if(len(test_strong) > 0):
p_test[test_strong.index] = list(best_estimator.predict(test_strong))
if(len(test_weak) > 0):
p_test[test_weak.index] = list(best_estimator.predict(test_weak))
res_label += [lab]
res_tbin += [tbin]
res_mae += [ np.round(mean_mae_per_bin[-1], 3)]
if( 'Lasso' in str(reg.__repr__)[:60]):
res_fi += [
list(zip(np.array(best_estimator.coef_), Xv.columns)) + [(best_estimator.intercept_,'Bias')]
]
else:
res_fi += [[0]]
mean_mae_per_bin[-1] = mean_mae_per_bin[-1]#*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)
bin_weights += [len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_lab += [np.sum(mean_mae_per_bin) ]
print("=================\nMean Label MAE = {:2.3f} | STD MAE = {:2.3f}".format( np.mean(mean_mae_per_bin), np.std(mean_mae_per_bin) ) )
strong_pred = p_test[y_test == 1]
weak_pred = p_test[y_test == 0]
plt.figure(figsize=(8,4))
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=10, weights=[1/len(y_train)]*len(y_train) )
plt.barh(y=bns[:-1] + 0.05, width=freqs*10, height=0.1, alpha=0.4, zorder=1)
plt.xlim([-1, len(strong_pred)+1])
sns.distplot(y, hist=False, color='black', bins=len(y), kde_kws={'cut':3})
sns.distplot(weak_pred, hist=False, color='blue')
t = sns.distplot(strong_pred, hist=False, color='red')
plt.close()
def isclose(a, b, abs_tol=0.001):
return abs(a-b) <= abs_tol
colors = ['black', 'blue', 'red']
labs = ['Train', 'Test - Weak', 'Test - Strong']
plt.figure()
for cc, unnor in enumerate(t.get_lines()):
newy = (unnor.get_ydata())/np.sum(unnor.get_ydata())
plt.plot(unnor.get_xdata(), newy, color=colors[cc], label=labs[cc])
if(cc == 1):
tnewy = []
newx = unnor.get_xdata()
for twp in weak_pred:
cands = (np.where([ isclose(tx, twp, 0.005) for tx in newx])[0])
tnewy.append(cands[len(cands)//2])
plt.scatter(weak_pred, newy[tnewy], color=colors[cc], label=None)
if(cc == 2):
tnewy = []
newx = unnor.get_xdata()
for twp in strong_pred:
cands = (np.where([ isclose(tx, twp, 0.005) for tx in newx])[0])
tnewy.append(cands[len(cands)//2])
plt.scatter(strong_pred, newy[tnewy], color=colors[cc], label=None)
plt.ylim([0,0.04])
plt.xlim([0,1])
plt.title('Binding Scores Approximated Distributions', fontsize=14)
plt.legend()
plt.xlabel('Binding Score', fontsize=12)
plt.ylabel('$Probability(Score)$', fontsize=12)
1/0
"""
1/0
def d2r(d):
return d*3.14/180
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=64, weights=[1/len(y_train)]*len(y_train) )
sns.distplot( y_train.loc[:,lab], bins=8, hist=True,norm_hist=True, kde=False )
plt.scatter(strong_pred, strong_pred)
ax = plt.subplot(111, projection='polar')
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=64, weights=[1/len(y_train)]*len(y_train) )
sns.distplot(y_train.loc[:,lab]*d2r(360), bins=8, hist=True, norm_hist=True, kde=False , ax=ax)
ax.set_xlabel('$P(V>v)$')
#tfr = 1-freqs.cumsum()
#tfr
plt.xticks( [d2r(x) for x in np.arange(0,360,45)], ['A{}'.format(x) for x in np.arange(0,360,45)] )
#plt.scatter( freqs[(10*strong_pred).astype(int)]*(360), strong_pred )
#.plt.scatter( freqs[(10*strong_pred).astype(int)]*(360), strong_pred )
plt.scatter( strong_pred*d2r(360), strong_pred/2 )
plt.scatter( weak_pred*d2r(360), weak_pred/2, zorder=10 )
#ax.bar( bns[1:]*360,freqs , width=0.2, alpha=0.4 )
spr = (np.round(strong_pred*100)//10).astype(int)
wpr = (np.round(weak_pred*100)//10).astype(int)
fr = np.round(freqs*100)
frcs = 1-freqs.cumsum()
frcs = np.concatenate( [[1], frcs[1:-1], [0]] )
plt.plot(frcs)
plt.scatter( fr[spr], strong_pred )
plt.scatter( fr[wpr], weak_pred )
ax = plt.subplot(111, projection='polar')
ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(strong_pred)]*36, lw=8, alpha=0.2, color='red')
ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(weak_pred)]*36, lw=8, alpha=0.2, color='blue')
thetas = [d2r(x) for x in np.linspace(0,360,8+1)[:-1]]
#ax.plot(thetas, strong_pred, 'r^', color='red')
#ax.plot(thetas, weak_pred, 'rv', color='blue')
ax.plot(thetas + [0], list(strong_pred) + [strong_pred[0]], '', color='red')
ax.plot(thetas + [0], list(weak_pred) + [weak_pred[0]], '', color='blue')
ax.set_rlabel_position(0)
#ax.set_rticks( [0,1],[2,'b'])#['']*len(np.arange(0,1.2,0.2)))
#ax.set_thetagrids([90,270])
#ax.set_rgrids()
#ax.set_yticks([])
#ax.set_ylim([0,1.1])
ax.set_xticks([])
_ = [ax.plot([d2r(x) for x in np.linspace(0,360,36)], [v]*36, alpha=0.1, color='black') for v in np.arange(0,1,0.1)]
ax = plt.subplot(111, projection='polar')
#ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(strong_pred)]*36, lw=8, alpha=0.2, color='red')
#ax.plot([d2r(x) for x in np.linspace(0,360,36)], [np.mean(weak_pred)]*36, lw=8, alpha=0.2, color='blue')
thetas = [d2r(x) for x in np.linspace(0,360,8+1)[:-1]]
#ax.plot(thetas, strong_pred, 'r^', color='red')
#ax.plot(thetas, weak_pred, 'rv', color='blue')
ax.plot(thetas + [0], list(strong_pred) + [strong_pred[0]], '', color='red')
ax.plot(thetas + [0], list(weak_pred) + [weak_pred[0]], '', color='blue')
ax.set_rlabel_position(0)
ax.set_rticks( [0,1],[2,'b'])#['']*len(np.arange(0,1.2,0.2)))
#ax.set_thetagrids([90,270])
#ax.set_rgrids()
ax.set_yticks([])
ax.set_xticks(np.arange(10), 1-np.cumsum(freqs))
#ax.set_ylim([0,1.1])
ax.set_xticks([])
[ax.plot([d2r(x) for x in np.linspace(0,360,36)], [v]*36, alpha=0.1, color='black') for v in np.arange(0,1,0.1)]
tmp_df_for_show = pd.DataFrame()
tmp_df_for_show['theta'] = list(thetas)*2
tmp_df_for_show['val'] = np.round(list(strong_pred) + list(weak_pred),3)
tmp_df_for_show['set'] = [1]*8 + [0]*8
#sns.FacetGrid(data=tmp_df_for_show, col="theta", hue="set", height="val")
g = sns.FacetGrid(tmp_df_for_show,subplot_kws=dict(projection='polar'), height=4.5,
sharex=False, sharey=False, despine=False)
g.map(sns.scatterplot,data=tmp_df_for_show, x='theta', y='val', hue='set')
#ax.bar(bns[:-1], freqs)
"""
plt.xticks([])
plt.savefig('./out/regressors/{}_{}_{}'.format(N, y_test.name, 'LassoIC') )
plt.show()
#plt.close()
print(tstr)
etime = clock()
print('Runtime: {:5.2f} [Seconds]'.format(etime-stime) )
df_results['label'] = res_label
df_results['tbin'] = res_tbin
df_results['fi'] = res_fi
df_results['mae'] = res_mae
#df_results['w_mae'] = np.array([ [mean_mae_per_lab[0]]*5, [mean_mae_per_lab[1]]*5, [mean_mae_per_lab[2]]*5]).reshape(-1)
df_results['w_mae'] = np.multiply(mean_mae_per_bin,bin_weights )
df_results.to_csv('./out/regressors/weighted_lasso.csv',index=False)
cv_res = pd.DataFrame({'MAE':np.mean(ber), 'STD':np.std(ber)})
print(centers)
#print(cv_res)
#print( 'Final WMAE = {:2.3f}'.format( np.sum(cv_res.iloc[:-1,0]*bin_weights) ) )
print( 'Final WMAE = {:2.3f}'.format( np.sum(df_results['w_mae']) ) )
1/0
lofi = []
for tbin in range(len(res_fi)):
ltz = np.where(np.array(res_fi)[tbin][:,0].astype(float) != 0)[0]
ifs = np.array(res_fi)[tbin][ltz,:]
ifs = [ [x[1], x[0]] for x in list(map(list, ifs))]
ifs = [ [x[0], np.round(float(x[1]),4) ] for x in ifs]
ifs = list(np.array(ifs)[np.argsort( np.abs(np.array(ifs)[:,1].astype(float)) )[-1::-1]])
ifs = list(map(list, ifs))
lofi += [ifs]
toPrint = list((dict(ifs).items()))[:5]
print(tbin, ' => ', toPrint)
df_results['fi'] = lofi
df_results.to_csv('./out/regressors/light_weighted_lasso.csv',index=False)
#%
fi_per_bin = df_results['fi'].copy()
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_fi = pd.DataFrame(np.zeros([5,len(combs_list)]))
df_fi.columns = combs_list
for i in range(len(fi_per_bin)):
tf = fi_per_bin[i]
df_fi.loc[i, list(np.array(tf)[:,0])] = (np.array(tf)[:,1]).astype(float)
zero_importance = list(df_fi.columns[np.where(df_fi.sum() == 0)])
zero_importance.remove('GTC')
sorted_imp = (df_fi.replace({0:np.nan}).median().sort_values())
sorted_imp = sorted_imp.fillna(0)
sorted_imp = sorted_imp[sorted_imp > 0]
sorted_imp = sorted_imp.sort_values()
sorted_imp = sorted_imp[-10:]
plt.figure()
plt.subplot(2,1,1)
sns.scatterplot(x=sorted_imp.index, y=sorted_imp)
plt.xticks(sorted_imp.index, rotation=60)
plt.title('Kmers Median Coefficients')
plt.ylim([-0.01, 0.2])
plt.subplot(2,1,2)
sns.scatterplot(x=zero_importance, y=[0]*len(zero_importance))
plt.xticks(zero_importance, rotation=60)
plt.title('Kmers Non Important')
plt.ylim([-0.01, 0.2])
plt.tight_layout()
plt.show()
#%% IGNORE 20.4.20
1/0
#PLOTTER - Dynamic clustering and prediction
"""
This techinique invloves all of our research,
by using PCA we learn the existence of 5 clusters,
by using kmeans we classify each sequence to its cluster,
by using regressors suchj as lasso we train a model for each cluster
and predict labels with high resolution.
we can compare results with or without dynamic clustering.
"""
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
from itertools import product
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pickle
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
for ClusterSize in [0,1]:
for KMER in [1,2,3,4]:
print('\n========================================================')
print('========================================================')
print( ['Without Clustering','With Clustering'][ClusterSize] )
print( '{}-Mer'.format(KMER) )
N = KMER
#labels = ['poly','prim','primo']
labels = ['primo']
with_clustering = True
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
# apply KMEANS
km = KMeans(5, random_state=42 )
bins_pred = km.fit_predict(df.drop(labels,axis=1))
pickle.dump(km, open('./out/regressors/models/km.sav' , 'wb') )
df = pd.read_csv('./data/chip_B_favor.csv')
df['bin'] = ClusterSize*bins_pred
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LassoLarsIC
test_df = pd.read_csv('./data/validation.csv').loc[:,['seq','toKeep','label']]
test_df = test_df.iloc[np.where(test_df['toKeep'] > 0)[0],:].reset_index(drop=True)
test_df = test_df.loc[:,['seq','label']]
splitted = pd.DataFrame(np.zeros([len(test_df),36]))
splitted = splitted.add_suffix('_nuc')
for i,seq in enumerate(test_df['seq']):
splitted.iloc[i,:] = list(seq)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
splitted = OHE(splitted)
splitted['bin'] = km.predict(splitted)
test_df['bin'] = splitted['bin']
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
#Train preparation
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
df_mer['seq'] = df['seq']
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
X = df_mer.copy()
X['bin'] = df['bin']
train = X.copy()
y = df[labels]
#Test preparation
df_mer = pd.DataFrame(np.zeros([len(test_df), len(combs_list)]))
df_mer.columns = combs_list
mers = test_df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, N) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
test = df_mer.copy()
test['bin'] = test_df['bin']
y_test = test_df['label']
X_test = test.copy().reset_index(drop=True)
y_test = y_test.copy().reset_index(drop=True)
p_test = np.zeros(len(y_test))
X_train = train.copy().reset_index(drop=True)
if( with_clustering == False):
X_train['bin'] = 0
y_train = y.copy().reset_index(drop=True)
mean_mae_per_lab = []
df_results = pd.DataFrame()
res_label = []
res_tbin = []
res_mae = []
res_fi = []
res_bias = []
bin_weights = []
for lab in labels:
mean_mae_per_bin = []
for tbin in np.unique(X_train['bin']):
test_strong = pd.DataFrame()
test_weak = pd.DataFrame()
yv = (y_train.loc[:,lab].iloc[np.where(X_train['bin'] == tbin)[0]])
Xv = X_train.iloc[np.where(X_train['bin'] == tbin)[0]].copy().drop(['bin','seq'],axis=1)
tst_idxs = np.where(X_test['bin'] == tbin)[0]
tst_idxs = np.array(list(tst_idxs))
if( len(tst_idxs) != 0 ):
yt = y_test.iloc[tst_idxs].copy()
#initiate Test Set
test_strong = X_test.iloc[yt[yt==1].index].drop('bin',axis=1)
test_weak = X_test.iloc[yt[yt==0].index].drop('bin',axis=1)
"""
# drop zero cols
keepCols = np.where(np.sum(Xv) > 0)[0]
Xv = Xv.iloc[:,keepCols]
test_strong = test_strong.iloc[:,keepCols]
test_weak = test_weak.iloc[:,keepCols]
"""
#reg = LassoLarsIC('bic', fit_intercept=False, positive=True)
reg = LassoLarsIC('bic')
# LassoIC Regression Fitting
res = cross_validate(reg, Xv , y=yv, groups=None,
scoring='neg_mean_absolute_error', cv=5, n_jobs=6, verbose=0,
fit_params=None, return_estimator=True)
best_estimator = res['estimator'][np.argmax(res['test_score'])]
# Save best model and collect resutls
pickle.dump(best_estimator, open('./out/regressors/models/{}_{}.sav'.format(lab, tbin) , 'wb') )
tmp_err = np.min(-res['test_score'])
#mean_mae_per_bin += [ tmp_err*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_bin += [ tmp_err ]
print( str(tbin) + ' ' + lab + ' lasso -> ', tmp_err )
if(len(test_strong) > 0):
p_test[test_strong.index] = list(best_estimator.predict(test_strong))
if(len(test_weak) > 0):
p_test[test_weak.index] = list(best_estimator.predict(test_weak))
res_label += [lab]
res_tbin += [tbin]
res_mae += [ np.round(mean_mae_per_bin[-1], 3)]
res_fi += [
list(zip(np.array(best_estimator.coef_), Xv.columns)) + [(np.round(best_estimator.intercept_, 3), 'Bias')]
]
mean_mae_per_bin[-1] = mean_mae_per_bin[-1]#*len(np.where(X_train['bin'] == tbin)[0])/len(X_train)
bin_weights += [len(np.where(X_train['bin'] == tbin)[0])/len(X_train)]
mean_mae_per_lab += [ np.sum(np.multiply(mean_mae_per_bin,bin_weights)) ]
print("Mean MAE = {}".format(mean_mae_per_lab[-1]) )
strong_pred = p_test[y_test == 1]
weak_pred = p_test[y_test == 0]
plt.figure(figsize=(8,4))
[freqs,bns] = np.histogram(y_train.loc[:,lab], bins=10, weights=[1/len(y_train)]*len(y_train) )
plt.barh(y=bns[:-1] + 0.05, width=freqs*10, height=0.1, alpha=0.4, zorder=1)
plt.xlim([-1, len(strong_pred)+1])
plt.scatter( x=np.arange(len(strong_pred)), y=strong_pred, color='red' , zorder=2)
plt.scatter( x=np.arange(len(weak_pred)), y=weak_pred , color='blue', zorder=3)
plt.legend(['Allegedly Strong Bonding', 'Allegedly Weak Bonding'])
plt.xlabel('Sample Index')
plt.title('Lasso - {0} distribution\nModel MAE = {1:2.3f}'.format(lab, (np.min(-res['test_score'])) ),
fontsize=16, fontname='Arial')
yticks = freqs
yticks = np.round(yticks,2)
yticks = list((yticks*100).astype(int).astype(str))
yticks = [ x + '%' for x in yticks]
plt.yticks( bns+0.05 , yticks)
plt.ylabel("Bin Probability",fontsize=12)
ax = plt.gca().twinx()
ax.yaxis.tick_right()
plt.yticks(np.arange(0,1.1,0.1))
ax.set_ylabel("Relative Bonding Strength",fontsize=12)
plt.xticks([])
#plt.savefig('./out/regressors/{}_{}_{}'.format(N, y_test.name, 'LassoIC') )
plt.show()
plt.close()
df_results['label'] = res_label
df_results['tbin'] = res_tbin
df_results['fi'] = res_fi
df_results['mae'] = res_mae
#df_results['w_mae'] = np.array([ [mean_mae_per_lab[0]]*5, [mean_mae_per_lab[1]]*5, [mean_mae_per_lab[2]]*5]).reshape(-1)
df_results['w_mae'] = np.multiply(mean_mae_per_bin,bin_weights )
lofi = []
for tbin in range(len(res_fi)):
ltz = np.where(np.array(res_fi)[tbin][:,0].astype(float) != 0)[0]
ifs = np.array(res_fi)[tbin][ltz,:]
ifs = [ [x[1], x[0]] for x in list(map(list, ifs))]
ifs = [ [x[0], np.round(float(x[1]),4) ] for x in ifs]
ifs = list(np.array(ifs)[np.argsort(np.array(ifs)[:,1])[-1::-1]])
ifs = list(map(list, ifs))
lofi += [ifs]
#print(tbin, '\n', dict(ifs), '\n')
df_results['fi'] = lofi
#df_results.to_csv('./out/regressors/light_weighted_lasso.csv',index=False)
print('========================================================')
print('========================================================\n')
#%% Exp sequences Generator - VERY HEAVY - DO NOT RUN UNLESS U HAVE TIME
df_results.index = df_results['label']
df_gen = X_train.loc[:,['seq','bin']].reset_index(drop=True)
df_gen['primo'] = y_train['primo'].copy()
#df_gen = df_gen.groupby('bin').mean().sort_values('primo').reset_index()
# REF seqs
seq_max = X_train.iloc[np.where(y_train['primo'] == 1)[0],:]['seq']
seq_min = X_train.iloc[np.where(y_train['primo'] == 0)[0],:]['seq']
seq_max = list(seq_max)[0]
seq_min = list(seq_min)[0]
"""
For Each Bin:
choose min seq
find similar seq which is not in the training
predict its' bin and score
choose max seq
find similar seq which is not in the training
predict its' bin and score
"""
exp_bins = ['max','min']
exp_seqs = [seq_max, seq_min]
exp_pred = [1,0]
N = 1
for tbin in np.unique(df_gen['bin']):
mdl = pickle.load(open('./out/regressors/models/primo_{}.sav'.format(tbin), 'rb') )
tdf = df_gen.iloc[np.where(df_gen['bin'] == tbin)[0],:]
tdf = tdf.iloc[np.where(tdf['primo'] > 0)[0],:]
tdf = tdf.iloc[np.where(tdf['primo'] < 1)[0],:]
#sort
tdf = tdf.sort_values('primo').reset_index()
# =============== MIN SEQ HANDLE =====================
tminseq = tdf.iloc[0,:]['seq']
cands_seqs = []
cands_scre = []
#find similar seq
letters = ['A','C','G','T']
newseq = tminseq
for i in range(len(newseq)):
for j in range(4):
if(i >= tminseq.find('GTC') and i < tminseq.find('GTC')+3):
continue
else:
newseq = tminseq[:i] + letters[j] + tminseq[i+1:]
seqexsits = [ x for x in tdf['seq'] if newseq == x ]
if( len(seqexsits) > 0):
continue
else:
df_newseq = pd.DataFrame(list(newseq))
df_newseq = df_newseq.T.add_suffix('_nuc')
df_newseq = OHE(df_newseq)
pbin = km.predict(df_newseq)[0]
if(pbin != tbin):
continue
else:
df_newseq = pd.DataFrame()
df_newseq['seq'] = pd.Series(newseq)
#Test preparation
df_mer = pd.DataFrame(np.zeros([0, len(combs_list)]))
df_mer.columns = combs_list
mers = df_newseq['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_newseq = df_mer.copy()
cands_seqs += [newseq]
cands_scre += [mdl.predict(df_newseq)[0]]
if(i % 4 == 0):
print(i)
df_cands = pd.DataFrame({'seq':cands_seqs,'primo':cands_scre})
df_cands = df_cands.sort_values('primo').reset_index()
exp_seqs += [ df_cands.iloc[0,:]['seq'] ]
exp_bins += [ str(tbin) ]
exp_pred += [ df_cands.iloc[0,:]['primo'] ]
# =============== MAX SEQ HANDLE =====================
tmaxseq = tdf.iloc[-1,:]['seq']
cands_seqs = []
cands_scre = []
#find similar seq
letters = ['A','C','G','T']
newseq = tmaxseq
for i in range(len(newseq)):
for j in range(4):
if(i >= tmaxseq.find('GTC') and i < tmaxseq.find('GTC')+3):
continue
else:
newseq = tmaxseq[:i] + letters[j] + tmaxseq[i+1:]
seqexsits = [ x for x in tdf['seq'] if newseq == x ]
if( len(seqexsits) > 0):
continue
else:
df_newseq = pd.DataFrame(list(newseq))
df_newseq = df_newseq.T.add_suffix('_nuc')
df_newseq = OHE(df_newseq)
pbin = km.predict(df_newseq)[0]
if(pbin != tbin):
continue
else:
df_newseq = pd.DataFrame()
df_newseq['seq'] = pd.Series(newseq)
#Test preparation
df_mer = pd.DataFrame(np.zeros([0, len(combs_list)]))
df_mer.columns = combs_list
mers = df_newseq['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, N) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_newseq = df_mer.copy()
cands_seqs += [newseq]
cands_scre += [mdl.predict(df_newseq)[0]]
if(i % 4 == 0):
print(i)
df_cands = pd.DataFrame({'seq':cands_seqs,'primo':cands_scre})
df_cands = df_cands.sort_values('primo').reset_index()
exp_seqs += [ df_cands.iloc[-1,:]['seq'] ]
exp_bins += [ str(tbin) ]
exp_pred += [ df_cands.iloc[-1,:]['primo'] ]
df_exp = pd.DataFrame({'bin':exp_bins,
'seq':exp_seqs,
'pred':exp_pred})
df_exp.to_csv('./out/exp_seqs2.csv', index=False)
1/0
"""
Here we can analyze the Feature Importance of each regressor
"""
fi = [np.array(x)[:,0] for x in df_results['fi']]
t = pd.DataFrame(fi).astype(float)
t.columns = Xv.columns
t = np.sum(t)
t = pd.Series(t).sort_values()
t = t[t>0]
#%% Generate words for trial
"""
This section is ment for generating sequences which
we will apply a physical test on.
In order to generate a proper experiment we need few elements:
1 - 2 reference seqs which we can normalize
resutls according to.
2 - 5 strong easy-to-predict seqs
3 - 5 weak easy-to-predict seqs
4 - 5 strong hard-to-predict seqs
5 - 5 weak hard-to-predict seqs
total seqs = 22
"""
#%% Exp sequences Generator - VERY HEAVY - DO NOT RUN UNLESS U HAVE TIME
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
df = pd.read_csv('./out/exp_seqs.csv')
# =============================================================================
#
# =============================================================================
N = 3
splitted = pd.DataFrame(np.zeros([len(df),36]))
splitted = splitted.add_suffix('_nuc')
for i,seq in enumerate(df['seq']):
splitted.iloc[i,:] = list(seq)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
#Train preparation
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
df_mer['seq'] = df['seq']
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
X = df_mer.copy()
X['bin'] = df['bin']
# =============================================================================
#
# =============================================================================
exp_bins = ['max','min']
exp_seqs = [seq_max, seq_min]
exp_pred = [1,0]
for i in range(len(X)):
tseq = X.iloc[i,-2]
tbin = X.iloc[i,-1]
tfeats = pd.DataFrame(X.iloc[i, :-2]).T
tpred = -1
if(tbin == 'max' or tbin == 'min'):
continue
mdl = pickle.load(open('./out/regressors/models/primo_{}.sav'.format(tbin), 'rb') )
exp_bins += [tbin]
exp_pred += list(mdl.predict(tfeats))
exp_seqs += [tseq]
df['pred2'] = exp_pred
#%% Exp sequences Generator - V2
df_results.index = df_results['label']
df_gen = X_train.loc[:,['seq','bin']].reset_index(drop=True)
df_gen['primo'] = y_train['primo'].copy()
#df_gen = df_gen.groupby('bin').mean().sort_values('primo').reset_index()
# REF seqs
seq_max = X_train.iloc[np.where(y_train['primo'] == 1)[0],:]['seq']
seq_min = X_train.iloc[np.where(y_train['primo'] == 0)[0],:]['seq']
seq_max = list(seq_max)[0]
seq_min = list(seq_min)[0]
"""
For Each Bin:
choose min seq
find similar seq which is not in the training
predict its' bin and score
choose max seq
find similar seq which is not in the training
predict its' bin and score
"""
exp_bins = ['max','min']
exp_seqs = [seq_max, seq_min]
exp_pred = [1,0]
N = 3
for tbin in [2,4]:
print('Processing Bin ', tbin)
mdl = pickle.load(open('./out/regressors/models/primo_{}.sav'.format(tbin), 'rb') )
tdf = df_gen.iloc[np.where(df_gen['bin'] == tbin)[0],:]
tdf = tdf.iloc[np.where(tdf['primo'] > 0)[0],:]
tdf = tdf.iloc[np.where(tdf['primo'] < 1)[0],:]
#sort
tdf = tdf.sort_values('primo').reset_index()
"""
plt.figure()
plt.hist(tdf['primo'], bins=64)
plt.title(str(tbin))
plt.xlim([0,1])
continue
"""
tmin = tdf.iloc[1*len(tdf)//10,:]
tmean = tdf.iloc[len(tdf)//2,:]
tmax = tdf.iloc[9*len(tdf)//10,:]
print('tmin : ', tmin['seq'], ': {:2.2f}'.format( tmin['primo']) )
print('tmean: ', tmean['seq'], ': {:2.2f}'.format( tmean['primo']) )
print('tmax : ', tmax['seq'], ': {:2.2f}'.format( tmax['primo']) )
# =============== SEQ HANDLE =====================
for tseq in [tmin, tmean, tmax]:
cands_seqs = []
cands_scre = []
tminseq = str(tseq['seq'])
print(tminseq)
#find similar seq
letters = ['A','C','G','T']
newseq = tminseq
for i in range(len(newseq)):
for j in range(4):
if(i >= tminseq.find('GTC') and i < tminseq.find('GTC')+3):
continue
else:
newseq = tminseq[:i] + letters[j] + tminseq[i+1:]
seqexsits = [ x for x in tdf['seq'] if newseq == x ]
if( len(seqexsits) > 0):
continue
else:
df_newseq = pd.DataFrame(list(newseq))
df_newseq = df_newseq.T.add_suffix('_nuc')
df_newseq = OHE(df_newseq)
pbin = km.predict(df_newseq)[0]
if(pbin != tbin):
continue
else:
df_newseq = pd.DataFrame()
df_newseq['seq'] = pd.Series(newseq)
#Test preparation
df_mer = pd.DataFrame(np.zeros([0, len(combs_list)]))
df_mer.columns = combs_list
mers = df_newseq['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#count mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_newseq = df_mer.copy()
cands_seqs += [newseq]
cands_scre += [mdl.predict(df_newseq)[0]]
if(i % 4 == 0):
print(i)
df_cands = pd.DataFrame({'seq':cands_seqs,'primo':cands_scre})
df_cands = df_cands.sort_values('primo').reset_index()
# min
exp_seqs += [ df_cands.iloc[0,:]['seq'] ]
exp_bins += [ str(tbin) ]
exp_pred += [ df_cands.iloc[0,:]['primo'] ]
#mean
exp_seqs += [ df_cands.iloc[len(df_cands)//2,:]['seq'] ]
exp_bins += [ str(tbin) ]
exp_pred += [ df_cands.iloc[len(df_cands)//2,:]['primo'] ]
#max
exp_seqs += [ df_cands.iloc[-1,:]['seq'] ]
exp_bins += [ str(tbin) ]
exp_pred += [ df_cands.iloc[-1,:]['primo'] ]
df_exp = pd.DataFrame({'bin':exp_bins,
'seq':exp_seqs,
'pred':exp_pred})
df_exp.to_csv('./out/exp_seqs2.csv', index=False)
#%% COMPARE VALIDATION PREDICTION TO EXPERIMENTAL MEASURMENTS
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
dfe = pd.read_csv('./data/validation_corr.csv').iloc[:,:-1].dropna(axis=0)
dfe.columns = ['ind','group','seq','primo','pmol']
dfe = dfe.sort_values('seq').reset_index(drop=True)
plt.plot(dfe['primo'])
#plt.plot(dfp['binding'])
dfp = test_df.copy()
dfp['p'] = p_test
dfe.index = dfe['seq']
dfe = dfe.loc[dfp['seq'], :].reset_index(drop=True)
ndf = pd.DataFrame()
ndf['seq'] = test_df['seq']
ndf['ampiric_score'] = dfe['primo']
ndf['predicted_score'] = dfp['p']
ndf['corr'] = ndf.corr().iloc[0,1]
ndf.to_csv('./out/VAL_CORR96.csv',index=False)
#%% Fixed Correlation Figure before and after Kmers
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def count_letters(df_nucs, rep_dict):
X = df_nucs.copy()
X = X.replace(rep_dict)
X = np.array(X)
X = np.sum(X,1)
return X
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = | pd.read_csv('data/chip_B_favor.csv') | pandas.read_csv |
"""
.. module:: linregress
:platform: Unix
:synopsis: Contains methods for doing linear regression.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from disaggregator import GreenButtonDatasetAdapter as gbda
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
def run_regressions(trace_series,temps_series,cal_hdd_temp_range=range(50,60),
cal_cdd_temp_range=range(60,75),plot=False):
'''
Takes in a series from a trace and a temperature series and runs linear regressions
over a range of cooling and heating setpoints. For each linear regression, temperature
values above the setpoint are used, with temps below the cooling setpoint (and above
the heating setpoint) are set to the setpoint. This is to make the linear regression
similar to those conducted for cooling and heating degree days. This method outputs
a dictionary containing the best slopes and intercepts, as well as their corresponding
setpoint temperatures and adjusted r2 values.
'''
results_dict = {}
df_trace = pd.DataFrame(trace_series,columns=['kwh'])
df_trace = df_trace.sort_index()
best_r2_adj_cool = float("-inf")
best_r2_adj_heat = float("-inf")
best_cdd_temp = 0
best_hdd_temp = 0
slope_cdd = None
slope_hdd = None
intercept_hdd = None
intercept_cdd = None
results_cdd = None
results_hdd = None
df_all_best_cool = None
df_all_best_heat = None
df_temps=pd.DataFrame(temps_series,columns=['temp'])
for cdd_setpoint in cal_cdd_temp_range:
df_temps_dropped=df_temps.drop(df_temps[df_temps['temp']<=cdd_setpoint].index)
df_all = pd.merge(df_trace,df_temps_dropped,left_index=True,right_index=True)
if(len(df_all) > 1):
results = pd.ols(y=df_all['kwh'], x = df_all['temp'])
r2_adj = results.r2_adj
if(r2_adj > best_r2_adj_cool):
best_cdd_temp = cdd_setpoint
best_r2_adj_cool = r2_adj
slope_cdd = results.beta[0]
intercept_cdd = results.beta[1]
results_cdd=results
df_all_best_cool = df_all
if plot and df_all_best_cool is not None and len(df_all_best_cool) > 1:
df_plot=df_all_best_cool.drop(df_all_best_cool[df_all_best_cool['temp']==best_cdd_temp].index)
plt.plot(df_plot['temp'],df_plot['kwh'],'.r')
x = np.arange(best_cdd_temp,100,.2)
y = x * slope_cdd + intercept_cdd
plt.plot(x,y,'k')
for hdd_setpoint in cal_hdd_temp_range:
df_temps_dropped=df_temps.drop(df_temps[df_temps['temp']>=hdd_setpoint].index)
df_all = pd.merge(df_trace,df_temps_dropped,left_index=True,right_index=True)
if(len(df_all) > 1):
results = | pd.ols(y=df_all['kwh'], x=df_all['temp']) | pandas.ols |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""Structures data in ML-friendly ways."""
import re
import copy
import datetime as dt
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV
from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS
from avaml.aggregatedata.time_parameters import to_time_parameters
from varsomdata import getforecastapi as gf
from varsomdata import getmisc as gm
__author__ = 'arwi'
LABEL_PROBLEM_PRIMARY = {
"ext_attr": [
"avalanche_problem_type_id",
"avalanche_problem_type_name",
"avalanche_type_id",
"avalanche_type_name",
"avalanche_ext_id",
"avalanche_ext_name"
],
"values": {
_NONE: [0, "", 0, "", 0, ""],
"new-loose": [3, "Nysnø (løssnøskred)", 20, "Løssnøskred", 10, "Tørre løssnøskred"],
"wet-loose": [5, "Våt snø (løssnøskred)", 20, "Løssnøskred", 15, "Våte løssnøskred"],
"new-slab": [7, "Nysnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"drift-slab": [10, "Fokksnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"pwl-slab": [30, "Vedvarende svakt lag (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"wet-slab": [45, "Våt snø (flakskred)", 10, "Flakskred", 25, "Våte flakskred"],
"glide": [50, "Glideskred", 10, "Flakskred", 25, "Våte flakskred"]
}
}
LABEL_PROBLEM = {
"cause": {
"ext_attr": ["aval_cause_id", "aval_cause_name"],
"values": {
"0": [0, ""],
"new-snow": [10, "Nedføyket svakt lag med nysnø"],
"hoar": [11, "Nedsnødd eller nedføyket overflaterim"],
"facet": [13, "Nedsnødd eller nedføyket kantkornet snø"],
"crust": [14, "Dårlig binding mellom glatt skare og overliggende snø"],
"snowdrift": [15, "Dårlig binding mellom lag i fokksnøen"],
"ground-facet": [16, "Kantkornet snø ved bakken"],
"crust-above-facet": [18, "Kantkornet snø over skarelag"],
"crust-below-facet": [19, "Kantkornet snø under skarelag"],
"ground-water": [20, "Vann ved bakken/smelting fra bakken"],
"water-layers": [22, "Opphopning av vann i/over lag i snødekket"],
"loose": [24, "Ubunden snø"]
}
},
"dsize": {
"ext_attr": ["destructive_size_ext_id", "destructive_size_ext_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "1 - Små"],
'2': [2, "2 - Middels"],
'3': [3, "3 - Store"],
'4': [4, "4 - Svært store"],
'5': [5, "5 - Ekstremt store"]
}
},
"prob": {
"ext_attr": ["aval_probability_id", "aval_probability_name"],
"values": {
'0': [0, "Ikke gitt"],
'2': [2, "Lite sannsynlig"],
'3': [3, "Mulig"],
'5': [5, "Sannsynlig"],
}
},
"trig": {
"ext_attr": ["aval_trigger_simple_id", "aval_trigger_simple_name"],
"values": {
'0': [0, "Ikke gitt"],
'10': [10, "Stor tilleggsbelastning"],
'21': [21, "Liten tilleggsbelastning"],
'22': [22, "Naturlig utløst"]
}
},
"dist": {
"ext_attr": ["aval_distribution_id", "aval_distribution_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "Få bratte heng"],
'2': [2, "Noen bratte heng"],
'3': [3, "Mange bratte heng"],
'4': [4, "De fleste bratte heng"]
}
},
"lev_fill": {
"ext_attr": ["exposed_height_fill"],
"values": {
'0': [0],
'1': [1],
'2': [2],
'3': [3],
'4': [4],
}
}
}
LABEL_PROBLEM_MULTI = {
"aspect": {
"ext_attr": "valid_expositions",
}
}
LABEL_PROBLEM_REAL = {
"lev_max": {
"ext_attr": "exposed_height_1",
},
"lev_min": {
"ext_attr": "exposed_height_2",
}
}
LABEL_GLOBAL = {
"danger_level": {
"ext_attr": ["danger_level", "danger_level_name"],
"values": {
'1': [1, "1 liten"],
'2': [2, "2 Moderat"],
'3': [3, "3 Betydelig"],
'4': [4, "4 Stor"],
'5': [5, "5 Meget stor"]
}
},
"emergency_warning": {
"ext_attr": ["emergency_warning"],
"values": {
"Ikke gitt": ["Ikke gitt"],
"Naturlig utløste skred": ["Naturlig utløste skred"],
}
}
}
COMPETENCE = [0, 110, 115, 120, 130, 150]
class ForecastDataset:
def __init__(self, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), max_file_age=23):
"""
Object contains aggregated data used to generate labeled datasets.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param seasons: Tuple/list of string representations of avalanche seasons to fetch.
"""
self.seasons = sorted(list(set(seasons)))
self.date = None
self.regobs_types = regobs_types
self.weather = {}
self.regobs = {}
self.varsom = {}
self.labels = {}
self.use_label = True
for season in seasons:
varsom, labels = _get_varsom_obs(year=season, max_file_age=max_file_age)
self.varsom = merge(self.varsom, varsom)
self.labels = merge(self.labels, labels)
regobs = _get_regobs_obs(season, regobs_types, max_file_age=max_file_age)
self.regobs = merge(self.regobs, regobs)
weather = _get_weather_obs(season, max_file_age=max_file_age)
self.weather = merge(self.weather, weather)
@staticmethod
def date(regobs_types, date: dt.date, days, use_label=True):
"""
Create a dataset containing just a given day's data.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param date: Date to fetch and create dataset for.
:param days: How many days to fetch before date. This will be max for .label()'s days parameter.
"""
self = ForecastDataset(regobs_types, [])
self.date = date
self.use_label = use_label
self.regobs = _get_regobs_obs(None, regobs_types, date=date, days=days)
self.varsom, labels = _get_varsom_obs(None, date=date, days=days-1 if days > 0 else 1)
self.weather = _get_weather_obs(None, date=date, days=days-2 if days > 2 else 1)
self.labels = {}
for label_keys, label in labels.items():
if label_keys not in self.labels:
self.labels[label_keys] = {}
for (label_date, label_region), label_data in label.items():
if label_date == date.isoformat():
subkey = (label_date, label_region)
self.labels[label_keys][subkey] = label_data
return self
def label(self, days, with_varsom=True):
"""Creates a LabeledData containing relevant label and features formatted either in a flat structure or as
a time series.
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
:return: LabeledData
"""
table = {}
row_weight = {}
df = None
df_weight = None
df_label = pd.DataFrame(self.labels, dtype="U")
days_w = {0: 1, 1: 1, 2: 1}.get(days, days - 1)
days_v = {0: 1, 1: 2, 2: 2}.get(days, days)
days_r = days + 1
varsom_index = pd.DataFrame(self.varsom).index
weather_index = pd.DataFrame(self.weather).index
if len(df_label.index) == 0 and self.use_label:
raise NoBulletinWithinRangeError()
if self.date and not self.use_label:
season = gm.get_season_from_date(self.date)
regions = gm.get_forecast_regions(year=season, get_b_regions=True)
date_region = [(self.date.isoformat(), region) for region in regions]
else:
date_region = df_label.index
for monotonic_idx, entry_idx in enumerate(date_region):
date, region_id = dt.date.fromisoformat(entry_idx[0]), entry_idx[1]
def prev_key(day_dist):
return (date - dt.timedelta(days=day_dist)).isoformat(), region_id
# Just check that we can use this entry.
try:
if with_varsom:
for n in range(1, days_v):
if prev_key(n) not in varsom_index:
raise KeyError()
for n in range(0, days_w):
if prev_key(n) not in weather_index:
raise KeyError()
add_row = True
# We don't check for RegObs as it is more of the good to have type of data
except KeyError:
add_row = False
if add_row:
row = {}
for region in REGIONS:
row[(f"region_id_{region}", "0")] = float(region == region_id)
if with_varsom:
for column in self.varsom.keys():
for n in range(1, days_v):
# We try/except an extra time since single dates may run without a forecast.
row[(column, str(n))] = self.varsom[column][prev_key(n)]
for column in self.weather.keys():
for n in range(0, days_w):
try:
row[(column, str(n))] = self.weather[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
for column in self.regobs.keys():
for n in range(2, days_r):
try:
row[(column, str(n))] = self.regobs[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
try:
weight_sum = self.regobs['accuracy'][prev_key(0)]
if weight_sum < 0:
row_weight[entry_idx] = 1 / 2
elif weight_sum == 0:
row_weight[entry_idx] = 1
elif weight_sum > 0:
row_weight[entry_idx] = 2
except KeyError:
row_weight[entry_idx] = 1
# Some restructuring to make DataFrame parse the dict correctly
for key in row.keys():
if key not in table:
table[key] = {}
table[key][entry_idx] = row[key]
# Build DataFrame iteratively to preserve system memory (floats in dicts are apparently expensive).
if (monotonic_idx > 0 and monotonic_idx % 1000 == 0) or monotonic_idx == len(date_region) - 1:
df_new = pd.DataFrame(table, dtype=np.float32).fillna(0)
df_weight_new = pd.Series(row_weight)
df = df_new if df is None else pd.concat([df, df_new])
df_weight = df_weight_new if df is None else | pd.concat([df_weight, df_weight_new]) | pandas.concat |
"""
Created on Mon Apr 12 09:17:14 2021
Developed for UIF to more easily handle the growing number of alumni they have,
and to track interactions with said alumni.
Final Project for CCAC DAT-281
@author: BKG
"""
import os
import sys
import sqlite3
from sqlite3 import Error
import pandas as pd
import PySimpleGUI as sg
def main():
"""
The main menu
Present ths user with a gui and 4 buttons to choose from
Based on what the user clicks on, executes other functions or closes
Returns
-------
None.
"""
os.chdir(os.path.dirname(sys.argv[0]))
sg.theme('DarkBlue3')
layout = [[sg.Text('Please select an action that you would like to perform:',
size=(25,3),
font=('Arial', 15))],
[sg.Button('Import new alumni to the database',
key='alum',
size=(30,1))],
[sg.Button('Import new interaction with alumni',
key='interaction',
size=(30,1))],
[sg.Text('_' * 100, size=(32, 1))],
[sg.Button('Export list of alumni with ID numbers',
key='export_ID',
size=(30,1))],
[sg.Button('Export list of next alumni to contact',
key='contact',
size=(30,1))],
[sg.Text('_' * 100, size=(32, 1))],
[sg.Button('Close the program',
key='close',
size=(30,1))]]
window = sg.Window('UIF: Alumni Database', layout)
while True:
event = window.read()
if event[0] == 'alum':
window.close()
main_alum()
elif event[0] == 'interaction':
window.close()
main_interaction()
elif event[0] == 'export_ID':
window.close()
main_export_id()
elif event[0] == 'contact':
window.close()
main_contact()
elif event[0] in ('close', sg.WIN_CLOSED):
break
window.close()
def main_alum():
location = select_file()
if location is not None:
new_alumni_gui(location)
else:
main()
def main_interaction():
location = select_file()
if location is not None:
new_interaction_gui(location)
else:
main()
def main_export_id():
location = select_folder()
if location is not None:
export_alumni_name_list(location)
else:
main()
def main_contact():
location = select_folder()
if location is not None:
export_alumni_contact_list(location)
else:
main()
def select_file():
layout = [[sg.Text('Folder Location')],
[sg.Input(), sg.FileBrowse()],
[sg.OK(), sg.Cancel()] ]
window = sg.Window('UIF: Alumni Database', layout)
values = window.read()
window.close()
if values[1][0] != '':
return values[1][0]
return None
def select_folder():
layout = [[sg.Text('Folder Location')],
[sg.Input(), sg.FolderBrowse()],
[sg.OK(), sg.Cancel()] ]
window = sg.Window('UIF: Alumni Database', layout)
values = window.read()
window.close()
if values[1][0] != '':
return values[1][0]
return None
def all_good():
layout = [[sg.Text('Everything completed without errors.',
font=('Arial', 15))],
[sg.Button('Exit the program', key='close')]]
window = sg.Window('UIF: Alumni Database', layout)
while True:
event = window.read()
if event[0] in ('close', sg.WIN_CLOSED):
break
window.close()
def export_alumni_name_list(path):
"""
Opens a connection to the database.
Queries the database.
Output is put into a dataframe.
Dataframe is written to .csv file.
Returns
-------
None.
"""
connection = _db_connection()
query = ''' SELECT ID_number, first_name, last_name,
graduation_year, CORE_student
FROM Basic_Info
ORDER BY last_name ASC
'''
output = pd.read_sql(query, con=connection)
connection.close()
col_names = ['ID Number',
'First Name',
'Last Name',
'Graduation Year',
'CORE?']
output.columns = col_names
file_name = 'Master Alumni List.csv'
# path = select_folder()
os.chdir(path)
output.to_csv(file_name, index=False, encoding='utf-8')
all_good()
def export_alumni_contact_list(path):
query_read = '''SELECT c.ID_number, c.first_name, c.last_name,
c.CORE_student, c.last_date, b.phone_num, b.email
FROM Last_Contact c
INNER JOIN Basic_Info b
ON c.ID_number = b.ID_number
WHERE last_date < DATE('now', '-90 days')
ORDER BY c.CORE_student DESC, c.last_date ASC
'''
connection = _db_connection()
contact = | pd.read_sql(query_read, con=connection) | pandas.read_sql |
"""Accessors to Pandas DataFrame interpreting metadata in the column index.
Two versions: 'ms' assumes labeled data, 'ums' assumes unlabeled data"""
from collections import namedtuple
import numpy as np
import pandas as pd
from pandas_flavor import register_dataframe_accessor
from .utils import _is_string
def create_multiindex_with_labels(df, labels=["no label"], level_name="label"):
cols = df.columns
n = len(cols)
metanames = cols.names
if not labels:
labels = ["no label"]
elif _is_string(labels):
labels = [labels]
else:
labels = list(labels)
nr = n // len(labels)
newstrs = []
for s in labels:
newstrs.extend([s] * nr)
if len(metanames) > 1:
tcols = [list(c) for c in cols.to_flat_index()]
else:
tcols = [[c] for c in cols]
newcols = [tuple([ns] + c) for (ns, c) in zip(newstrs, tcols)]
return pd.MultiIndex.from_tuples(newcols, names=[level_name] + metanames)
@register_dataframe_accessor("cdl")
class CDLAccessor(object):
"""An accessor to Pandas DataFrame to interpret content as column organized, labeled data.
This interpretation assumes that the **column** index stores the essential
metadata, namely, sample names and group labels. This index is
ususally hierarquical and other levels are optional. Accessor 'ums' for unlabeled data
where level 0 are assumed to be sample names is also available in this module.
Interpretation is based on the following conventions :
- For the accessor to work, the column index must have at leat two levels.
- Level 1 is interpreted as sample names by the accessor.
Default name for this level is 'sample' and default values are 'Sample {i}'.
.samples is a property to access this level.
- Level 0 is interpreted as labels. .labels is a property to access labels.
- More levels are possible, if they are read from data sources or added by Pandas index manipulation.
The (row) index is interpreted as "features", often labels of spectral entities. Examples are
m/z values, formulae or any format-specific labeling scheme. It may be hierarquical.
"""
def __init__(self, df):
self._validate(df)
self._df = df
@staticmethod
def _validate(df):
"""Require a pandas DataFrame with at least two levels in column MultiIndex to work."""
if not isinstance(df, pd.DataFrame):
raise AttributeError("'cdl' must be used with a Pandas DataFrame")
if len(df.columns.names) < 2:
raise AttributeError(
"Must have at least label and sample metadata on columns"
)
def _get_zip_labels_samples(self):
self._df.columns = self._df.columns.remove_unused_levels()
return zip(
self._df.columns.get_level_values(0), self._df.columns.get_level_values(1)
)
@property
def unique_labels(self):
"""Get the different data labels (with no repetitions)."""
return tuple(pd.unique(self.labels))
@property
def labels(self):
"""iterate over labels of each DataFrame column."""
self._df.columns = self._df.columns.remove_unused_levels()
return self._df.columns.get_level_values(0)
@labels.setter
def labels(self, value):
"""Setter for data labels."""
self._rebuild_col_level(value, 0)
@property
def label_count(self):
"""Get the number of labels."""
# 'no label' still counts as one (global) label
return len(self.unique_labels)
@property
def unique_samples(self):
"""Get the different sample names (with no repetitions in case the number of levels > 2)."""
return tuple(pd.unique(self.samples))
@property
def samples(self):
"""iterate over sample names of each DataFrame column."""
self._df.columns = self._df.columns.remove_unused_levels()
return self._df.columns.get_level_values(1)
@samples.setter
def samples(self, value):
"""Setter for sample names."""
self._rebuild_col_level(value, 1)
@property
def sample_count(self):
"""Get the number of samples."""
return len(self.unique_samples)
def _rebuild_col_level(self, value, level):
cols = self._df.columns.remove_unused_levels()
n = len(cols)
metanames = cols.names
# handle value
if value is None or len(value) == 0:
if level == 0:
value = ["no label"]
elif level == 1:
value = [f"Sample {i}" for i in range(1, n + 1)]
else:
value = [f"Info {i}" for i in range(1, n + 1)]
elif _is_string(value):
value = [value]
else:
value = list(value)
nr = n // len(value)
newstrs = []
for s in value:
newstrs.extend([s] * nr)
cols = [list(c) for c in cols]
for i, s in enumerate(newstrs):
cols[i][level] = s
newcols = [tuple(c) for c in cols]
self._df.columns = pd.MultiIndex.from_tuples(newcols, names=metanames)
@property
def feature_count(self):
"""Get the number of features."""
return len(self._df.index)
@property
def iter_labels_samples(self):
"""iterate over pairs of (label, sample name) for each DataFrame column."""
self._df.columns = self._df.columns.remove_unused_levels()
return self._get_zip_labels_samples()
@property
def no_labels(self):
"""True if there is only one (global) label 'no label'."""
return self.label_count == 1 and self.labels[0] == "no label"
def info(self, all_data=False):
"""A dicionary of global counts or a DataFrame with info for each sample"""
if all_data:
return dict(
samples=self.sample_count,
labels=self.label_count,
features=self.feature_count,
)
ls_table = [(s, l) for (l, s) in self._get_zip_labels_samples()]
ls_table.append((self.sample_count, self.label_count))
indx_strs = [str(i) for i in range(self.sample_count)] + ["global"]
return pd.DataFrame(ls_table, columns=["sample", "label"], index=indx_strs)
def label_of(self, sample):
"""Get label from sample name"""
for lbl, s in self._get_zip_labels_samples():
if s == sample:
return lbl
raise KeyError(f"No label found for '{sample}'")
def samples_of(self, label):
"""Get a list of sample names from label"""
snames = [s for lbl, s in self._get_zip_labels_samples() if lbl == label]
return snames
def _get_subset_data_indexer(self, sample=None, label=None, no_drop_na=False):
if sample is None and label is None:
return list(self._df.columns)
if sample is not None:
if _is_string(sample):
samples = [sample]
else:
samples = list(sample)
indexer = []
for s in samples:
if s not in self.samples:
raise KeyError(f"'{s}' is not a sample name")
lbl = self.label_of(s)
indexer.append((lbl, s))
if len(indexer) == 1:
indexer = indexer[0]
return indexer
elif sample is None and label is not None:
if _is_string(label):
labels = [label]
else:
labels = list(label)
indexer = []
for s in labels:
if s not in self.labels:
raise KeyError(f"'{s}' is not a label")
indexer.append(s)
#indexer = (indexer,)
return indexer
else:
raise KeyError("Sample name or label not found")
def _get_subset_data(self, sample=None, label=None, no_drop_na=False):
if sample is None and label is None:
df = self._df
else:
col_indexer = self.subset_iloc(sample=sample, label=label)
df = self._df.iloc[:, col_indexer]
# col_indexer = self._get_subset_data_indexer(sample=sample, label=label)
# df = self._df.loc[:, col_indexer]
df = df.copy() if no_drop_na else df.dropna(how="all")
if isinstance(df, pd.DataFrame):
df.columns = df.columns.remove_unused_levels()
return df
def take(self, **kwargs):
"""Retrieves subset of data by sample name or label."""
return self._get_subset_data(**kwargs)
def subset(self, **kwargs):
"""Alias for take()."""
return self.take(**kwargs)
def features(self, **kwargs):
"""Get the row index (features) indexing data by sample name or label"""
df = self._get_subset_data(**kwargs)
return df.index
def subset_where(self, sample=None, label=None):
"""return a boolean DataFrame with the location of subset."""
df = | pd.DataFrame(False, index=self._df.index, columns=self._df.columns) | pandas.DataFrame |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
msg = "unable to collapse Joint Filters"
# not implemented
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[df.index[2:7], "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[df.index[2:7], "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_select_as_multiple(setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
msg = "keys must be a list/tuple"
# no tables stored
with pytest.raises(TypeError, match=msg):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(TypeError, match=msg):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(TypeError, match=msg):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple("df1", where=["A>0", "B>0"], selector="df1")
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected, check_freq=False)
# FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
msg = "all tables must have exactly the same nrows!"
with pytest.raises(ValueError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
{"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]},
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_query_with_nested_special_character(setup_path):
df = DataFrame(
{
"a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
"b": [1, 2, 3, 4, 5, 6, 7, 8],
}
)
expected = df[df.a == "test & test"]
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
result = store.select("test", 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_query_long_float_literal(setup_path):
# GH 14241
df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
cutoff = 1000000000.0006
result = store.select("test", f"A < {cutoff:.4f}")
assert result.empty
cutoff = 1000000000.0010
result = store.select("test", f"A > {cutoff:.4f}")
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select("test", f"A == {exact:.4f}")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(setup_path):
# GH 15492
df = DataFrame(
{
"date": ["2014-01-01", "2014-01-02"],
"real_date": date_range("2014-01-01", periods=2),
"float": [1.1, 1.2],
"int": [1, 2],
},
columns=["date", "real_date", "float", "int"],
)
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
ts = Timestamp("2014-01-01") # noqa
result = store.select("test", where="real_date > ts")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ["<", ">", "=="]:
# non strings to string column always fail
for v in [2.1, True, Timestamp("2014-01-01"), pd.Timedelta(1, "s")]:
query = f"date {op} v"
msg = f"Cannot compare {v} of type {type(v)} to string column"
with pytest.raises(TypeError, match=msg):
store.select("test", where=query)
# strings to other columns must be convertible to type
v = "a"
for col in ["int", "float", "real_date"]:
query = f"{col} {op} v"
msg = "could not convert string to "
with pytest.raises(ValueError, match=msg):
store.select("test", where=query)
for v, col in zip(
["1", "1.1", "2014-01-01"], ["int", "float", "real_date"]
):
query = f"{col} {op} v"
result = store.select("test", where=query)
if op == "==":
expected = df.loc[[0], :]
elif op == ">":
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("where", ["", (), (None,), [], [None]])
def test_select_empty_where(where):
# GH26610
df = | DataFrame([1, 2, 3]) | pandas.DataFrame |
#!/usr/bin/python
import sys
import os
import getopt
import pyliftover
import csv
import pandas as pd
def main():
params = parseArgs()
if params.liftover:
lo = pyliftover.LiftOver(params.liftover)
if params.table:
tab=pd.read_csv(params.table, sep="\t")
print("Read table:")
print(tab)
def convert(row):
name="chr"+row[params.chrom]
ret=lo.convert_coordinate(name, row[params.bp])
return(int(ret[0][1]))
tab[params.ocol] = tab.apply(convert,axis = 1)
print("Writing the output table:")
print(tab)
tab.to_csv(params.oname, sep="\t", index=False)
if params.marey:
marey=make_marey(tab, params.chrom, params.ocol)
print("Created the following Marey Map input:")
print(marey)
mout=params.oname+"_mmap.txt"
marey.to_csv(mout, sep=" ", quoting=csv.QUOTE_NONNUMERIC, index=False)
else:
params.display_help("Error: No table provided")
else:
params.display_help("Error: No liftover file provided")
#function writes a spoof marey map file from a table of :
#chr \t bp \t cM \t liftover.bp
def make_marey(table, chrom, bp):
ret= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from utils.combine import merge_by_index
from utils.euclidean import euclidean_distance
def merge_xy_means(data):
grouped = data \
.groupby(['run_id', 'trial_index'], as_index=False) \
.agg(x_mean=('x', 'mean'),
y_mean=('y', 'mean'))
data = merge_by_index(data, grouped, 'x_mean', 'y_mean')
return data
def distance_from_xy_mean_square(data):
data = merge_xy_means(data)
data['distance_from_xy_mean_square'] = np.power(
euclidean_distance(data['x'], data['x_mean'],
data['y'], data['y_mean']),
2)
data['distance_from_xy_mean_square_px'] = np.power(euclidean_distance(
(data['x'] * data['window_width']),
(data['x_mean'] * data['window_width']),
(data['y'] * data['window_height']),
(data['y_mean'] * data['window_height'])), 2)
missing_values = data.loc[
pd.isna(data['distance_from_xy_mean_square']),
['x', 'y', 'x_pos', 'y_pos', 'distance_from_xy_mean_square']]
summary = data[['distance_from_xy_mean_square',
'distance_from_xy_mean_square_px']].describe()
print(f"""Squared distance from the average: \n"""
f"""{round(summary, 2)} \n""")
if len(missing_values) > 0:
print(f"""! Attention: Missing values: \n"""
f"""{missing_values} \n \n """)
else:
print(" - No missing values found. \n")
return data
def aggregate_precision_from_et_data(data_trial, data_et):
grouped = data_et.groupby(['run_id', 'trial_index'], as_index=False)[[
'distance_from_xy_mean_square',
'distance_from_xy_mean_square_px']].mean()
grouped = grouped.assign(
precision=np.sqrt(grouped['distance_from_xy_mean_square']),
precision_px=np.sqrt(grouped['distance_from_xy_mean_square_px']))
data_trial = merge_by_index(data_trial, grouped,
'precision', 'precision_px')
missing_values = data_trial.loc[
pd.notna(data_trial['x_pos']) &
| pd.isna(data_trial['precision']) | pandas.isna |
#%%
import pandas as pd
import numpy as np
data = pd.read_csv("../data/30daychart-parttime.csv")
dw = pd.melt(
data, id_vars=["sex", "n_child", "geo"], var_name="year", value_name="percent"
)
dw["percent"] = dw["percent"].astype(float)
dw["year"] = dw["year"].astype(int)
# per sex/child/year count/avg/std for non na
aggr = (
dw.groupby(["sex", "n_child", "year"])
.agg(mean=("percent", "mean"), std=("percent", "std"), count=("percent", "count"))
.reset_index()
)
aggr.head()
aggr["ci_95"] = 1.96 * aggr["std"] / np.sqrt(aggr["count"])
aggr["low"] = aggr["mean"] - aggr["ci_95"]
aggr["high"] = aggr["mean"] + aggr["ci_95"]
#%%
aggr.head()
#%%
from scipy.stats import linregress
trended = pd.DataFrame()
for s in ["F", "M"]:
for c in [0, 1, 2]:
series = aggr[(aggr.sex == s) & (aggr.n_child == c)]
x = series["year"]
y = series["mean"]
slope, intercept, r_value, p_value, std_err = linregress(x, y)
print("slope: %f, intercept: %f" % (slope, intercept))
print("R-squared: %f" % r_value ** 2)
series["trend"] = intercept + slope * series["year"]
trended = | pd.concat([trended, series]) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MRT')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from functools import partial
from typing import Any, Callable, Dict, Tuple, Union
import numpy as np
import pandas as pd
from flask_babel import gettext as _
from pandas import DataFrame, NamedAgg, Timestamp
from superset.exceptions import InvalidPostProcessingError
NUMPY_FUNCTIONS = {
"average": np.average,
"argmin": np.argmin,
"argmax": np.argmax,
"count": np.ma.count,
"count_nonzero": np.count_nonzero,
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"max": np.max,
"mean": np.mean,
"median": np.median,
"nansum": np.nansum,
"nanmin": np.nanmin,
"nanmax": np.nanmax,
"nanmean": np.nanmean,
"nanmedian": np.nanmedian,
"nanpercentile": np.nanpercentile,
"min": np.min,
"percentile": np.percentile,
"prod": np.prod,
"product": np.product,
"std": np.std,
"sum": np.sum,
"var": np.var,
}
DENYLIST_ROLLING_FUNCTIONS = (
"count",
"corr",
"cov",
"kurt",
"max",
"mean",
"median",
"min",
"std",
"skew",
"sum",
"var",
"quantile",
)
ALLOWLIST_CUMULATIVE_FUNCTIONS = (
"cummax",
"cummin",
"cumprod",
"cumsum",
)
PROPHET_TIME_GRAIN_MAP = {
"PT1S": "S",
"PT1M": "min",
"PT5M": "5min",
"PT10M": "10min",
"PT15M": "15min",
"PT30M": "30min",
"PT1H": "H",
"P1D": "D",
"P1W": "W",
"P1M": "M",
"P3M": "Q",
"P1Y": "A",
"1969-12-28T00:00:00Z/P1W": "W-SUN",
"1969-12-29T00:00:00Z/P1W": "W-MON",
"P1W/1970-01-03T00:00:00Z": "W-SAT",
"P1W/1970-01-04T00:00:00Z": "W-SUN",
}
RESAMPLE_METHOD = ("asfreq", "bfill", "ffill", "linear", "median", "mean", "sum")
FLAT_COLUMN_SEPARATOR = ", "
def _flatten_column_after_pivot(
column: Union[float, Timestamp, str, Tuple[str, ...]],
aggregates: Dict[str, Dict[str, Any]],
) -> str:
"""
Function for flattening column names into a single string. This step is necessary
to be able to properly serialize a DataFrame. If the column is a string, return
element unchanged. For multi-element columns, join column elements with a comma,
with the exception of pivots made with a single aggregate, in which case the
aggregate column name is omitted.
:param column: single element from `DataFrame.columns`
:param aggregates: aggregates
:return:
"""
if not isinstance(column, tuple):
column = (column,)
if len(aggregates) == 1 and len(column) > 1:
# drop aggregate for single aggregate pivots with multiple groupings
# from column name (aggregates always come first in column name)
column = column[1:]
return FLAT_COLUMN_SEPARATOR.join([str(col) for col in column])
def _is_multi_index_on_columns(df: DataFrame) -> bool:
return isinstance(df.columns, pd.MultiIndex)
def validate_column_args(*argnames: str) -> Callable[..., Any]:
def wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
def wrapped(df: DataFrame, **options: Any) -> Any:
if _is_multi_index_on_columns(df):
# MultiIndex column validate first level
columns = df.columns.get_level_values(0)
else:
columns = df.columns.tolist()
for name in argnames:
if name in options and not all(
elem in columns for elem in options.get(name) or []
):
raise InvalidPostProcessingError(
_("Referenced columns not available in DataFrame.")
)
return func(df, **options)
return wrapped
return wrapper
def _get_aggregate_funcs(
df: DataFrame,
aggregates: Dict[str, Dict[str, Any]],
) -> Dict[str, NamedAgg]:
"""
Converts a set of aggregate config objects into functions that pandas can use as
aggregators. Currently only numpy aggregators are supported.
:param df: DataFrame on which to perform aggregate operation.
:param aggregates: Mapping from column name to aggregate config.
:return: Mapping from metric name to function that takes a single input argument.
"""
agg_funcs: Dict[str, NamedAgg] = {}
for name, agg_obj in aggregates.items():
column = agg_obj.get("column", name)
if column not in df:
raise InvalidPostProcessingError(
_(
"Column referenced by aggregate is undefined: %(column)s",
column=column,
)
)
if "operator" not in agg_obj:
raise InvalidPostProcessingError(
_(
"Operator undefined for aggregator: %(name)s",
name=name,
)
)
operator = agg_obj["operator"]
if callable(operator):
aggfunc = operator
else:
func = NUMPY_FUNCTIONS.get(operator)
if not func:
raise InvalidPostProcessingError(
_(
"Invalid numpy function: %(operator)s",
operator=operator,
)
)
options = agg_obj.get("options", {})
aggfunc = partial(func, **options)
agg_funcs[name] = | NamedAgg(column=column, aggfunc=aggfunc) | pandas.NamedAgg |
#----------------------------------------------------------
#importing Neccessary libraries
import pandas as pd
import os.path
from os import path
from datetime import date
#----------------------------------------------------------
#Important functions
def enter_record():
n='y'
while n=='y':
s=str(date.today())
k=int(input('enter cost '))
data=[s,k]
df=pd.DataFrame({ "date" : data[0],"value":data[1]},index=[0])
if path.exists('Expense_Record.csv'):
df.to_csv('Expense_Record.csv',mode='a',header=False,index=False)
else:
df.to_csv('Expense_Record.csv',mode='a',header=['Date','Value'],index=False)
n=input('Want to add more Record? y/n')
def display_record():
if path.exists('Expense_Record.csv'):
print( | pd.read_csv('Expense_Record.csv') | pandas.read_csv |
from collections import OrderedDict
from itertools import groupby
import matplotlib.pyplot as plt
import pandas as pd
import random
import re
SMALL_SIZE = 16
MEDIUM_SIZE = 18
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
_nsre = re.compile('([0-9]+)')
def natural_sort_key(s):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def color():
r = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
def color_code():
dc = {}
dc['wind_on'] = ['#669aaa', '#518696', '#326776']
dc['wind_off'] = ['#215968', '#104c5a', '#0e3947', '#00303d']
dc['pv'] = ['#ffffbb', '#ffff97', '#fffb4e', '#ffeb3b']
dc['bio'] = ['#c2f08e', '#aee571', '#95cb59']
dc['ror'] = ['#00378f', '#002171', '#00125e']
dc['rsvr'] = ['#6782e4', '#5472d3', '#2e56b4']
dc['nuc'] = ['#ff8180', '#e4696a', '#c34b4f']
dc['lig'] = ['#bb8874', '#a67561', '#895a47']
dc['hc'] = ['#8c7f76', '#74655c', '#5e5048']
dc['oil'] = ['#565753', '#4b4b47', '#3a3b38']
dc['other'] = [ '#a2b0b8','#cbdae3', '#b7c7cf']
dc['CCGT'] = ['#ff814b' , '#f95827']
dc['OCGT'] = ['#c62200', '#aa0000', '#880000']
dc['CU'] = ['#821a96']
dc['Li-ion'] = ['#659bfc']
dc['P2G2P'] = ['#f32f5e']
dc['PHS'] = ['#6adaad']
return dc
def get_symb(symbol, dimtojoin=None, factor=1, addstr = ''):
df = symbol.df.copy()
if dimtojoin is None:
df.loc[:,'t'] = df['symbol'].astype(str) + addstr
df.loc[:,'value'] = df['value']*factor
else:
df.loc[:,'t'] = df['symbol'].astype(str) +'-'+ df[dimtojoin].astype(str) + addstr
df.loc[:,'value'] = df['value']*factor
return df[['id','n','h','t','value']]
def get_symb_zeroUP(symbol, dimtojoin='tech', factor=1, addstr = ''):
df = symbol.df.copy()
df.loc[:,'t'] = df['symbol'].astype(str) +'-'+ df[dimtojoin].astype(str) + addstr
df.loc[df.value >= 0.0, 'value'] = 0.0
df.loc[df.value < 0.0,'value'] = df.loc[df.value < 0.0, 'value']*factor
return df[['id','n','h','t','value']]
def get_symb_zeroLO(symbol, dimtojoin='tech', factor=1, addstr = ''):
df = symbol.df.copy()
df.loc[:,'t'] = df['symbol'].astype(str) +'-'+ df[dimtojoin].astype(str) + addstr
df.loc[df.value <= 0.0, 'value'] = 0.0
df.loc[df.value > 0.0,'value'] = df.loc[df.value > 0.0, 'value']*factor
return df[['id','n','h','t','value']]
def get_changed(symbol, factor=1, addstr=''):
df = symbol.df.copy()
df.loc[:,'value'] = df['value']*factor
head = df['symbol'].unique()[0]
if addstr:
head = head + addstr
df = df.rename(columns={'value': head})
return df[['id','n','h',head]]
def change_header(symbol, name):
df = symbol.df.copy()
df = df.rename(columns={'value': name})
return df[['id','n','h',name]]
def get_rldc(symbols_dc):
STO_IN = symbols_dc['STO_IN']
G_L = symbols_dc['G_L']
STO_OUT = symbols_dc['STO_OUT']
CU = symbols_dc['agg_techCU']
con1a_bal = symbols_dc['con1a_bal']
G_INFES = symbols_dc['G_INFES']
RLDC = symbols_dc['RLDC']
RSVR_OUT = symbols_dc['RSVR_OUT']
F = symbols_dc['Fn'] # dims l,h,n
if 'ev_endogenous' in symbols_dc['features']:
EV_CHARGE = symbols_dc['EV_CHARGE'].dimreduc('ev')
EV_CHARGE.name = 'EV_CHARGE'
EV_DISCHARGE = symbols_dc['EV_DISCHARGE'].dimreduc('ev')
EV_DISCHARGE.name = 'EV_DISCHARGE'
else:
pass
symbs_list = [get_symb(STO_IN,'sto',-1,'neg'),
get_symb(STO_IN,'sto',1),
get_symb(G_L,'tech'),
get_symb(RSVR_OUT,'rsvr'),
get_symb(STO_OUT,'sto'),
get_symb_zeroUP(F,'l',1,'neg'),
get_symb_zeroUP(F,'l',-1,'pos'),
get_symb_zeroLO(F,'l',1)]
if 'ev_endogenous' in symbols_dc['features']:
symbs_list.append(get_symb(EV_DISCHARGE))
df = pd.concat(symbs_list)
df = df.set_index(['id','n','h','t']).unstack('t').fillna(0.0)
df = df.loc[:,(df != 0).any(axis=0)]
df = df['value']
df = df.join(get_changed(CU,-1,'neg').set_index(['id','n','h']))
df = df.join(get_changed(CU,1).set_index(['id','n','h']))
try:
df = df.join(get_changed(EV_CHARGE,-1,'neg').set_index(['id','n','h']))
df = df.join(get_changed(EV_CHARGE,1).set_index(['id','n','h']))
except:
pass
df = df.join(change_header(con1a_bal,'shadow').set_index(['id','n','h']))
df = df.join(change_header(G_INFES, 'infes').set_index(['id','n','h']))
df = df.join(change_header(RLDC, 'RLDC').set_index(['id','n','h']))
df = df.reset_index().sort_values(by= ['id', 'n', 'RLDC'],axis=0, ascending=False)
lt = []
for ix, gr in df.groupby(['id','n']):
dt = gr.copy()
dt.loc[:,'hr'] = [i for i in range(1,len(gr)+1)]
lt.append(dt)
data = | pd.concat(lt) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other):
# FIXME: DataFrame fails because when when operating column-wise
# timedelta64 entries become NaT and are treated like datetimes
box = box_df_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi = pd.period_range("2000-12-31", periods=3)
parr = pi.array
result = parr - pi
expected = pi - pi
tm.assert_index_equal(result, expected)
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = pd.Series(
[pd.Period("2015-01-02", freq="D"), pd.Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + pd.Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = pd.Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = pd.Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
expected = pd.Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = pd.Series(
[pd.Period("2015-01-05", freq="D"), pd.Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
expected = pd.Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestPeriodIndexSeriesMethods:
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
tm.assert_equal(result, expected)
ser = pd.Series(values)
result = func(ser)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period("2011-01", freq="M")
off = idx.freq
exp = | pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx") | pandas.Index |
import pandas as pd
import os
import random
from tqdm import tqdm
def read_csv_file(filename):
compression = None
if filename.endswith(".gz"):
compression = 'gzip'
return | pd.read_csv(filename, sep=",", index_col=None, header=0, compression=compression) | pandas.read_csv |
# Common Python library imports
import difflib
from concurrent.futures import ThreadPoolExecutor as TPE
from multiprocessing import cpu_count
# Pip package imports
import pandas as pd
from loguru import logger
# Internal package imports
from miner.core import IHandler, Converter
from miner.footballdata.scrapper import FootballDataRequest
__all__ = ["FootballDataHandler", "get_default_converter"]
def get_default_converter():
try:
from miner.footballdata.converters import SqlConverter
logger.debug("Class \'SqlConverter\' selected.")
return SqlConverter
except ImportError as err:
logger.warning(err)
try:
import pandas as pd
# TODO: Return with the fallback pandas converter
except ImportError as err:
logger.warning(err)
logger.debug("Class \'Converter\' selected.")
logger.warning("No [db_conn, pandas] packages are found. Falling back to the default Converter. Please makes sure if this is the expected behaviour")
return Converter
class FootballDataHandler(IHandler):
class SqlQuery(object):
def __init__(self, *args, **kwargs):
pass
def get_matches_where_odds_are_null(self, start_date, end_date):
try:
from db_conn.connection.postgresql import ConnectionPool
from db_conn.query.sc_soccer.select import get_matches_where_odds_are_null
except ImportError as err:
logger.error(err)
return pd.DataFrame()
else:
pool = ConnectionPool()
return pool.sql_query(get_matches_where_odds_are_null(start_date, end_date))
name = "Football-Data Scrapper"
slug = "football-data-scrapper"
version = "v0_1"
default_config = {
'years': ["19/20", "18/19", "17/18", "16/17", "15/16", "14/15", "13/14", "11/12", "10/11", "09/10", "08/09", "07/08", "06/07", "05/06"],
'alias': {
'Wolverhampton': 'Wolves',
'PSG': 'Paris SG',
'Bremen': '<NAME>',
'Fortuna': '<NAME>',
'1. FC Köln': 'FC Koln',
'Mainz 05': 'Mainz',
'Athletic': '<NAME>',
'Real Sociedad': 'Sociedad',
'ACR Messina': 'Messina',
'<NAME>': 'Siena',
'<NAME>.': '<NAME>',
'Deportivo La Coruña': 'La Coruna',
},
'multithreading': False,
'num_of_threads': cpu_count()
}
def __init__(self, *args, **kwargs):
kwargs['config'] = { **FootballDataHandler.default_config, **kwargs.get('config', {}) }
kwargs['converter'] = kwargs.get('converter', get_default_converter())
m_kwargs = {**{
'name': FootballDataHandler.name,
'slug': FootballDataHandler.slug,
'version': FootballDataHandler.version
}, **kwargs}
super(FootballDataHandler, self).__init__(*args, **m_kwargs)
self._query_executor = kwargs.get('query', FootballDataHandler.SqlQuery())
# Create the singleton Sofa requester
self._req = FootballDataRequest()
def _get_close_match(self, name, fd_name_list):
return difflib.get_close_matches(name, fd_name_list, cutoff=0.8)
#if len(result) == 0:
#pass
#logger.error("Team name: %s not found in list: %s" % (name, fd_name_list))
#return result
def _match_name(self, grp_data, football_df, key_var, curr_date):
selected_match = pd.DataFrame()
fd_home = football_df['HomeTeam'].to_list()
fd_away = football_df['AwayTeam'].to_list()
name_h = grp_data[key_var % 'home']
if name_h not in self._get_config('alias').keys():
result_h = self._get_close_match(name_h, fd_home)
else:
result_h = [self._get_config('alias')[name_h]]
# 0 or more than 1 result.
if len(result_h) != 1:
# Match the away team names
name_a = grp_data[key_var % 'away']
if name_a not in self._get_config('alias').keys():
result_a = self._get_close_match(name_a, fd_away)
else:
result_a = [self._get_config('alias')[name_a]]
# If 0 result found, log error and continue
if len(result_h) == 0 and len(result_a) == 0:
logger.warn("At date: %s No matched name for: \'%s\' with the possibilities: %s. All possibility that day [Home]: %s | [Away]: %s" % (
curr_date, [name_h, name_a], (result_h + result_a), fd_home, fd_away))
elif len(result_a) == 1:
selected_match = football_df[(football_df['AwayTeam'] == result_a[0])]
else:
# Select the row, where home teams are partially match, but filter with away team correctly.
selected_match = football_df[((football_df['AwayTeam'] == result_a[0]) & (
football_df['HomeTeam'].isin(result_h)))]
else:
# Filter with home team only
selected_match = football_df[football_df['HomeTeam'] == result_h[0]]
return selected_match
def _team_name_matcher(self, data, football_df, q, curr_date):
for row_index, grp_data in data.iterrows():
try:
selected_match = self._match_name(grp_data, football_df, "%s_team_short", curr_date)
if len(selected_match) == 0:
selected_match = self._match_name(grp_data, football_df, "%s_team", curr_date)
if len(selected_match) == 0:
continue
q.update_match_statistic(grp_data['id'], selected_match)
q.update_match_odds(grp_data['id'], selected_match)
except Exception as err:
logger.error(err)
def _fetch_date(self, curr_date, *args, **kwargs):
pass
def _process(self, input_tuple):
q = self._converter()
tr, season, df = input_tuple
football_df = self._req.parse_odds(tr, season)
# Convert Date object
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
try:
football_df['Date'] = pd.to_datetime(football_df['Date'], format='%d/%m/%Y')
except Exception:
football_df['Date'] = pd.to_datetime(football_df['Date'], format='%d/%m/%y')
#football_df['Date'] = football_df['Date'].dt.date
group_df = df.groupby(pd.Grouper(key='date', freq='1D'), group_keys=False)
for curr_date, group_data in group_df:
# Filter for dates
filrtered_df = football_df[football_df['Date'] == curr_date]
self._team_name_matcher(group_data, filrtered_df, q, curr_date)
return q.get()
def _do_fetch(self, start_date, end_date, *args, **kwargs):
# Get the database query as dataframe
query_df = self._query_executor.get_matches_where_odds_are_null(start_date, end_date)
seasons = query_df['season'].unique()
tournaments = query_df['tournament'].unique()
if self._get_config('multithreading'):
# Multithreading
param_list = []
# For loop one thread
for tr in tournaments:
# Filter for the tournament
filtered_df = query_df[(query_df['tournament'] == tr)]
for season in seasons:
# Filter for the season
season_filtered_df = filtered_df[ (filtered_df['season'] == season) ]
param_list.append( tuple( [tr, season, season_filtered_df]) )
# player_id_gen = split_into(player_ids, cpu_count() * 5)
with TPE(max_workers=self._get_config('num_of_threads')) as worker_pool:
res_list = worker_pool.map(lambda x: self._process(x), param_list)
return | pd.concat(res_list) | pandas.concat |
# Debit card data compilation
import pandas as pd
cols_list = ['UNI_PT_KEY', 'CIF', 'CARD_CLASS_CODE', 'CARD_NUM', 'PRODUCT',
'PRIMARY_ACCOUNT', 'CARD_SEGMENT', 'CARD_BIN', 'CARD_RANGE', 'EMBLEM_ID',
'ACCOUNT_OPEN_DATE', 'CARD_ISSUE_DATE', 'CARD_EXPIRY_DATE', 'CARD_ACTIVATION_DATE',
'FIRST_TRN_DATE', 'CARD_ACT_FLAG','IS_CARD_WITH_TOKEN']
debit = pd.read_csv("debitcards.csv", usecols=cols_list, dtype=str, sep=";", error_bad_lines=False, low_memory=False)
a = debit["CARD_NUM"].nunique()
b = debit["UNI_PT_KEY"].nunique()
c = debit["CIF"].nunique()
print("# of UNI_PT_KEY = " +str(b))
print("# of CARD_NUM = " + str(a))
print("# of CIF = " + str(c))
#other products
other_products = pd.read_csv("other_metrics.csv", sep=";", dtype=str)
other_products["OTHER_PRODUCTS"] = 1
dc_other_products = debit.merge(other_products, how="left", on="UNI_PT_KEY")
dc_other_products["OTHER_PRODUCTS"] = dc_other_products["OTHER_PRODUCTS"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_other_products["OTHER_PRODUCTS"].astype(int).sum()))
#mobile banking
mobile_banking = pd.read_csv("mobile_banking.csv", sep=";", dtype=str)
mobile_banking["MOBILE_BANKING"] = 1
mobile_banking = pd.DataFrame(mobile_banking)
dc_mobile_banking = dc_other_products.merge(mobile_banking, how="left", on="UNI_PT_KEY")
dc_mobile_banking["MOBILE_BANKING"] = dc_mobile_banking["MOBILE_BANKING"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_mobile_banking["MOBILE_BANKING"].astype(int).sum()))
#internet banking
internet_banking = pd.read_csv("internet_banking.csv", sep=";", dtype=str)
internet_banking["INTERNET_BANKING"] = 1
dc_internet_banking = dc_mobile_banking.merge(internet_banking, how="left", on="UNI_PT_KEY")
dc_internet_banking["INTERNET_BANKING"] = dc_internet_banking["INTERNET_BANKING"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_internet_banking["INTERNET_BANKING"].astype(int).sum()))
#branch delivery
branch_delivery = pd.read_csv("branch_delivery.csv", sep=";", dtype=str)
branch_delivery["BRANCH_DELIVERY"] = 1
dc_branch_delivery = dc_internet_banking.merge(branch_delivery, how="left", on="CARD_NUM")
dc_branch_delivery["BRANCH_DELIVERY"] = dc_branch_delivery["BRANCH_DELIVERY"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_branch_delivery["BRANCH_DELIVERY"].astype(int).sum()))
#staff
staff = pd.read_csv("staff_flag.csv", sep=";", dtype=str)
staff["STAFF_FLAG"] = 1
dc_staff_flag = dc_branch_delivery.merge(staff, how="left", on="UNI_PT_KEY")
dc_staff_flag["STAFF_FLAG"] = dc_staff_flag["STAFF_FLAG"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_staff_flag["STAFF_FLAG"].astype(int).sum()))
#email phone
email_phone = pd.read_csv("contact_email_phone.csv", sep=";", dtype=str, error_bad_lines=False, low_memory=False)
dc_email_phone = dc_staff_flag.merge(email_phone, how="left", on ="UNI_PT_KEY")
#contact address
contact_address = pd.read_csv("customer_address.csv", sep=";", dtype=str)
dc_contact_address = dc_email_phone.merge(contact_address, how="left", on="CARD_NUM")
# owner vs holder
owner_vs_holder = pd.read_csv("card_ownervsholder_dc.csv", sep=";").applymap(str)
dc_owner_flag = dc_contact_address.merge(owner_vs_holder, how="left", on="CARD_NUM")
dc_owner_flag["OWNER_FLAG"] = dc_owner_flag["OWNER_FLAG"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_owner_flag["OWNER_FLAG"].astype(int).sum()))
# current balance (run the SQL script again and compare)
current_balance = pd.read_csv("debit_current_balance.csv", sep=";", low_memory=False, error_bad_lines=False)
current_balance["SRC_ID"] = current_balance["SRC_ID"].astype(int).astype(str)
current_balance["CA_BAL"] = current_balance["CA_BAL"].apply(lambda x: x.replace(",", ".") if isinstance(x, str) else x).astype(str)
current_balance.drop_duplicates(subset="SRC_ID", keep="first", inplace=True)
dc_current_balance = dc_owner_flag.merge(current_balance, how="left", left_on="PRIMARY_ACCOUNT", right_on="SRC_ID")
dc_current_balance.drop("SRC_ID", axis=1, inplace=True)
del(current_balance, dc_owner_flag, owner_vs_holder, contact_address, email_phone, staff, branch_delivery, internet_banking,
mobile_banking, other_products, dc_contact_address, dc_email_phone, dc_staff_flag, dc_branch_delivery, dc_internet_banking, dc_mobile_banking, dc_other_products,debit)
# insurance
cols_list = ["CARD_NUM", "INSURANCE_FLAG"]
insurance_flag = | pd.read_csv("16_dc_insurance.csv", sep=";", usecols=cols_list) | pandas.read_csv |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
| pd.to_timedelta('1 days, 00:00:10') | pandas.to_timedelta |
"""
Iris dataset example.
This example demonstrates the following:
1. Simple "vanilla" example - how to setup a basic config on an easy dataset.
2. "overkill" example - how to write a custom loader, transformer, and augmentation
function.
"""
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from barrage import BarrageModel
from barrage.api import RecordLoader, RecordMode, RecordTransformer
from barrage.utils import io_utils
def get_data():
"""Load iris dataset."""
dataset = load_iris()
X, y = dataset.data, dataset.target
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=42
)
records_train = pd.DataFrame(X_train, columns=["i1", "i2", "i3", "i4"])
records_train["label"] = y_train
records_val = | pd.DataFrame(X_val, columns=["i1", "i2", "i3", "i4"]) | pandas.DataFrame |
import numpy as np
import pdb
import gzip
import matplotlib
import matplotlib.pyplot as plt
import cPickle as pkl
import operator
import scipy.io as sio
import os.path
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
np.random.seed(23254)
def parse(path):
g = gzip.open(path, 'r')
for l in g:
yield eval(l)
def getuserCache(df):
userCache = {}
for uid in sorted(df.uid.unique().tolist()):
items = sorted(df.loc[df.uid == uid]['iid'].values.tolist())
userCache[uid] = items
return userCache
def getitemCache(df):
itemCache = {}
for iid in sorted(df.iid.unique().tolist()):
users = sorted(df.loc[df.iid == iid]['uid'].values.tolist())
itemCache[iid] = users
return itemCache
def readData(dataset):
totalFile = | pd.read_csv('data/'+dataset+'/ratings.dat',sep="\t",usecols=[0,1],names=['uid','iid'],header=0) | pandas.read_csv |
import ibis
from pandas import read_csv
from pandas.core.frame import DataFrame
import pytest
from sql_to_ibis import register_temp_table, remove_temp_table
from sql_to_ibis.tests.utils import (
DATA_PATH,
MULTI_LOOKUP,
MULTI_MAIN,
MULTI_PROMOTION,
MULTI_PROMOTION_NO_OVERLAP,
MULTI_RELATIONSHIP,
get_all_join_columns_handle_duplicates,
)
scope_fixture = pytest.fixture(scope="session")
@scope_fixture
def pandas_client():
return ibis.pandas.PandasClient({})
@scope_fixture
def digimon_mon_list(pandas_client):
frame = read_csv(DATA_PATH / "DigiDB_digimonlist.csv")
frame["mon_attribute"] = frame["Attribute"]
return ibis.pandas.from_dataframe(
frame,
"DIGIMON_MON_LIST",
pandas_client,
)
@scope_fixture
def digimon_move_list(pandas_client):
frame = read_csv(DATA_PATH / "DigiDB_movelist.csv")
frame["move_attribute"] = frame["Attribute"]
return ibis.pandas.from_dataframe(frame, "DIGIMON_MOVE_LIST", pandas_client)
@scope_fixture
def forest_fires(pandas_client):
return ibis.pandas.from_dataframe(
read_csv(DATA_PATH / "forestfires.csv"), "FOREST_FIRES", pandas_client
)
@scope_fixture
def avocado(pandas_client):
return ibis.pandas.from_dataframe(
read_csv(DATA_PATH / "avocado.csv"), "AVOCADO", pandas_client
)
@scope_fixture
def time_data(pandas_client):
return ibis.pandas.from_dataframe(
read_csv(DATA_PATH / "time_data.csv"), "TIME_DATA", pandas_client
)
@scope_fixture
def multitable_join_main_table(pandas_client):
return ibis.pandas.from_dataframe(
DataFrame(
{
"id": [0, 1, 2, 3, 4],
"lookup_id": [1, 5, 8, 9, 10],
"relationship_id": [0, 1, 2, 2, 1],
"promotion_id": [0, 1, 2, 1, 0],
}
),
MULTI_MAIN,
pandas_client,
)
@scope_fixture
def multitable_join_lookup_table(pandas_client):
return ibis.pandas.from_dataframe(
DataFrame(
{
"id": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"lookup_value": [0, 3, 20, 10, 40, 20, 10, 10, 10, 10],
}
),
MULTI_LOOKUP,
pandas_client,
)
@scope_fixture
def multitable_join_relationship_table(pandas_client):
return ibis.pandas.from_dataframe(
DataFrame({"id": [0, 1, 2], "relation": ["rel1", "rel2", "rel3"]}),
MULTI_RELATIONSHIP,
pandas_client,
)
@scope_fixture
def multitable_join_promotion_table(pandas_client):
return ibis.pandas.from_dataframe(
| DataFrame({"id": [0, 1, 2], "promotion": ["none", "special", "extra special"]}) | pandas.core.frame.DataFrame |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), | u('isEnabled') | pandas.compat.u |
from context import dero
import dero.data.ff.create.sort as ff_sort
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
import datetime
class DataFrameTest:
df_3_fac = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, .51, 1000),
(10516, 'a', '1/2/2000', 1.02, .52, 2000),
(10516, 'a', '1/3/2000', 1.03, .53, 3000),
(10516, 'a', '1/4/2000', 1.04, .54, 4000),
(10516, 'b', '1/1/2000', 1.05, 1.55, 50000),
(10516, 'b', '1/2/2000', 1.06, 1.56, 60000),
(10516, 'b', '1/3/2000', 1.07, 1.57, 70000),
(10516, 'b', '1/4/2000', 1.08, 1.58, 80000),
(10517, 'a', '1/1/2000', 1.09, .59, 9000),
(10517, 'a', '1/2/2000', 1.10, .60, 10000),
(10517, 'a', '1/3/2000', 1.11, .61, 11000),
(10517, 'a', '1/4/2000', 1.12, .62, 12000),
(10517, 'b', '1/1/2000', 1.13, .63, 13000),
(10517, 'b', '1/2/2000', 1.14, .64, 14000),
(10517, 'b', '1/3/2000', 1.15, .65, 15000),
(10517, 'b', '1/4/2000', 1.16, .66, 16000),
(10518, 'a', '1/1/2000', 1.17, .67, 17000),
(10518, 'a', '1/2/2000', 1.18, .68, 18000),
(10518, 'a', '1/3/2000', 1.19, .69, 19000),
(10518, 'a', '1/4/2000', 1.20, .70, 20000),
(10518, 'b', '1/1/2000', 1.21, .71, 21000),
(10518, 'b', '1/2/2000', 1.22, .72, 22000),
(10518, 'b', '1/3/2000', 1.23, .73, 23000),
(10518, 'b', '1/4/2000', 1.24, .74, 24000),
], columns=['PERMNO', 'byvar', 'Date', 'RET', 'be/me', 'me'])
df_3_fac['Date'] = pd.to_datetime(df_3_fac['Date'])
class TestCalculateFFFactors(DataFrameTest):
def test_create_portfolios(self):
expect_df = pd.DataFrame(data=[
(10516, 'a', Timestamp('2000-01-01 00:00:00'), 1.01, 0.51, 1000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-02 00:00:00'), 1.02, 0.52, 2000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-03 00:00:00'), 1.03, 0.53, 3000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-04 00:00:00'), 1.04, 0.54, 4000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-01 00:00:00'), 1.05, 1.55, 50000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-02 00:00:00'), 1.06, 1.56, 60000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-03 00:00:00'), 1.07, 1.57, 70000, 3, | Timestamp('2000-01-01 00:00:00') | pandas.Timestamp |
import os
import sys
import csv
import json
import scipy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
from utils import pickle_to_file
import data_generator
def _get_last_datapoint(df):
return df.Value.values[-1]
def _get_mean(df):
if df.Value.size == 1:
return df.Value.values[0]
elif np.unique(df.Time).size == 1:
return df.Value.mean()
mean_time = df.Time.mean()
# lin_fit = scipy.stats.linregress(df.Time - mean_time, df.Value)
x_init = df.Time - mean_time
x = sm.add_constant(x_init)
lin_fit = sm.OLS(df.Value, x, missing = 'drop').fit()
return lin_fit.params[0]
def _get_max(df):
return df.Value.max()
def _get_min(df):
return df.Value.min()
def _get_sum(df):
return df.Value.sum()
def _get_identity(x):
return x.Value.values[0]
def _get_slope(df):
if df.Value.size == 1 or np.unique(df.Time).size == 1:
return 0
return scipy.stats.linregress(df.Time / 50., df.Value)[0]
LAST = _get_last_datapoint
MIN = _get_min
MAX = _get_max
WEIGHTED_MEAN = _get_mean
SUM = _get_sum
IDENTITY = _get_identity
SLOPE = _get_slope
# FEATURES = {
# # Based on the paper
# "GCS": [SLOPE, LAST, WEIGHTED_MEAN, MAX, MIN],
# "HCO3": [MIN, MAX, LAST, WEIGHTED_MEAN],
# "BUN": [MIN, MAX, LAST, WEIGHTED_MEAN],
# "Urine": [SUM],
# "Age": [IDENTITY],
# "SysABP": [WEIGHTED_MEAN, LAST, MIN, MAX],
# "WBC": [LAST, WEIGHTED_MEAN, MIN, MAX],
# "Temp": [WEIGHTED_MEAN, LAST, MIN, MAX],
# "Glucose": [MAX, MIN, WEIGHTED_MEAN],
# "Na": [WEIGHTED_MEAN, MAX, MIN],
# "Lactate": [LAST, WEIGHTED_MEAN, MIN, MAX],
# # Based on SAPS II or SAPS I (hhttps://archive.physionet.org/challenge/2012/saps_score.m)
# "HR": [MIN, MAX, WEIGHTED_MEAN],
# "K": [MIN, MAX, WEIGHTED_MEAN],
# "ICUType": [IDENTITY],
# "HCT": [WEIGHTED_MEAN, MIN, MAX],
# "RespRate": [WEIGHTED_MEAN, MIN, MAX],
# "MechVent": [MAX],
# # Based on most common measurements
# # "Creatinine": [WEIGHTED_MEAN, MIN, MAX],
# # "Platelets": [WEIGHTED_MEAN, MIN, MAX],
# # "Mg": [WEIGHTED_MEAN, MIN, MAX],
# # Baseline measurements, general descriptors
# "Gender": [IDENTITY],
# "Weight": [IDENTITY],
# "Height": [IDENTITY],
# }
# META_FEATURE_GROUPS = {
# "GCS": ["GCS"],
# "Metabolic": ["HCO3", "BUN", "Na", "K", "Glucose"],
# "SysABP": ["SysABP"],
# "CBC": ["WBC", "HCT"],
# "Temp": ["Temp"],
# "Lactate": ["Lactate"],
# "HR": ["HR"],
# "Respiration": ["RespRate", "MechVent", "O2"],
# "Urine": ["Urine"],
# "General Desc": ["Gender", "Height", "Weight", "Age", "ICUType"],
# }
META_FEATURE_GROUPS = {}
# Rather than get multiple features per group, only want one
# Choose them to cut down on missing data, if possible
FEATURES = {
# Based on the paper
"GCS": [MIN, WEIGHTED_MEAN, MAX],
"HCO3": [MIN, WEIGHTED_MEAN, MAX],
"BUN": [MIN, WEIGHTED_MEAN, MAX],
"Urine": [MIN, WEIGHTED_MEAN, MAX],
"Age": [IDENTITY],
"SysABP": [MIN, WEIGHTED_MEAN, MAX],
"WBC": [MIN, WEIGHTED_MEAN, MAX],
"Temp": [MIN, WEIGHTED_MEAN, MAX],
"Glucose": [MIN, WEIGHTED_MEAN, MAX],
"Na": [MIN, WEIGHTED_MEAN, MAX],
"Lactate": [MIN, WEIGHTED_MEAN, MAX],
"HR": [MIN, WEIGHTED_MEAN, MAX],
"K": [MIN, WEIGHTED_MEAN, MAX],
"ICUType": [IDENTITY],
"HCT": [MIN, WEIGHTED_MEAN, MAX],
"RespRate": [MIN, WEIGHTED_MEAN, MAX],
"MechVent": [MIN, WEIGHTED_MEAN, MAX], # this is a flag if they were on mechanical ventilation
# Baseline measurements, general descriptors
"Gender": [IDENTITY],
"Weight": [IDENTITY],
"Height": [IDENTITY],
}
NORMAL_RANGES = {
"GCS": [15, 15],
"HCO3": [20, 30],
"BUN": [8, 28],
"Urine": [2000, 4000],
"SysABP": [100, 199],
"WBC": [1, 19.9],
"Temp": [36, 38.9],
"Glucose": [62, 125],
"Na": [135, 145],
"Lactate": [0.5, 1],
"HR": [70, 119],
"K": [3.6, 5.2],
"HCT": [36, 45],
"RespRate": [12, 20],
"MechVent": [0, 0],
"O2": [200, 250],
}
MAX_PROCESS = 5000
def _process_feature_groups(col_names):
"""
@return List of feature indices that correspond to a single group that we want to measure importance of
Dictionary mapping each of these groups to their name and whether or not they are "individual" variable groups of "meta"-groups
Dictionary mapping variable to normal ranges and the indices of features extracted from that variable
"""
feature_groups = {}
for feature_idx, col_name in enumerate(col_names):
measure, processing_func_name = col_name.split(":")
measure = measure.strip()
if measure not in feature_groups:
feature_groups[measure] = {processing_func_name: feature_idx}
else:
feature_groups[measure][processing_func_name] = feature_idx
print(len(feature_groups))
# Create nan fill config
nan_fill_config = {}
for measure, feature_dict in feature_groups.items():
if measure in NORMAL_RANGES:
nan_fill_config[measure] = {
"indices": list(feature_dict.values()),
"range": NORMAL_RANGES[measure]}
# Create dictionary mapping variable importance group idx to the group name and some bool flags
feature_group_list = []
measure_names = {}
vi_idx = 0
for measure, feature_dict in feature_groups.items():
measure_names[vi_idx] = {
"name": "%s" % measure,
"is_ind": 1,
"is_group": int(measure in META_FEATURE_GROUPS)}
print(measure, feature_dict.values())
feature_group_list.append(",".join([str(i) for i in feature_dict.values()]))
vi_idx += 1
# Also process the meta groups
for group_name, group_members in META_FEATURE_GROUPS.items():
if len(group_members) > 1:
measure_names[vi_idx] = {
"name": group_name,
"is_ind": 0,
"is_group": 1}
feature_idx_list = []
for measure_name in group_members:
feature_idx_list += list(feature_groups[measure_name].values())
print(group_name, feature_idx_list)
feature_group_list.append(",".join([str(i) for i in feature_idx_list]))
vi_idx += 1
assert len(feature_group_list) == len(measure_names)
return feature_group_list, measure_names, nan_fill_config
def main(args=sys.argv[1:]):
train_size = float(args[0])
seed = int(args[1])
icu_data_dir = args[2]
# Read the y data
outcomes = pd.read_csv(icu_data_dir + "Outcomes-a.txt")
subject_outcomes = outcomes[["RecordID", "In-hospital_death"]]
# Create a dictionary of features for each subject
# Using a dictionary because some of the features don't appear in all subjects...
value_range = {} # this is just for printing out ranges of the values
file_folder = icu_data_dir + "set-a/"
all_subject_features = {}
for idx, filename in enumerate(os.listdir(file_folder)[:MAX_PROCESS]):
df = | pd.read_csv("%s%s" % (file_folder, filename)) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, errno
import glob
import requests
import datetime
import tarfile
from math import cos, asin, sqrt
import codecs
from lxml import html
import bs4
#fix from to rows issue
def writeData(data):
print(data)
# data
# data.to_csv(r'c:\data\pandas.txt', header=None, index=None, sep=' ', mode='a')
def fix_FileFormat():
baseDirectory = r'C:\Users\Isaac\source\repos\Python-Play\Datasets\FAOData'
BLOCKSIZE = 1048576 # or some other, desired size in bytes
path = r'{}\*.csv'.format(baseDirectory)
for fname in glob.glob(path):
with codecs.open(fname, "r", "ANSI") as sourceFile:
with codecs.open(fname.split(".csv")[0] + "_fix" + ".csv", "w", "utf-8") as targetFile:
while True:
contents = sourceFile.read(BLOCKSIZE)
if not contents:
break
targetFile.write(contents)
def tidy_FAOData():
baseDirectory = r'C:\Users\Isaac\source\repos\Python-Play\Datasets\FAOData'
path = r'{}\*Production_CropsProcessed_E_All_Data_(Normalized)_fix.csv'.format(baseDirectory)
chunksize = 10 ** 2
print(path)
for fname in glob.glob(path):
row_count = 0
last_count = 1
current_Indexes = []
data = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# # Rule size sensitivity benchmark
# In[17]:
PY_IDS_DURATION_ITERATIONS = 10
# # Guide to use lvhimabindu/interpretable_decision_sets
#
# * git pull https://github.com/lvhimabindu/interpretable_decision_sets interpretable_decision_sets_lakkaraju
# * locate your python *site_packages* directory
# * copy *interpretable_decision_sets_lakkaraju* into *site_packages*
# * correct errors in code to allow it to run (wrong identation etc.)
# # Interpretable Decision Sets - setup
# In[18]:
import interpretable_decision_sets_lakkaraju.IDS_smooth_local as sls_lakk
from interpretable_decision_sets_lakkaraju.IDS_smooth_local import run_apriori, createrules, smooth_local_search, func_evaluation
# In[19]:
import pandas as pd
import numpy as np
import time
# ## Simple example
# In[20]:
df = pd.read_csv('../../data/titanic_train.tab',' ', header=None, names=['Passenger_Cat', 'Age_Cat', 'Gender'])
df1 = pd.read_csv('../../data/titanic_train.Y', ' ', header=None, names=['Died', 'Survived'])
Y = list(df1['Died'].values)
itemsets = run_apriori(df, 0.1)
list_of_rules = createrules(itemsets, list(set(Y)))
# In[21]:
support_levels = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
support_levels = list(reversed(support_levels))
rule_counts_quantiles = [ int(support_levels[idx] * len(list_of_rules)) for idx in range(len(support_levels)) ]
# In[22]:
rule_counts_quantiles
# In[ ]:
#%%capture
benchmark_data = [
]
for rule_count in rule_counts_quantiles:
current_rules = list_of_rules[:rule_count]
time1 = time.time()
lambda_array = [1.0]*7 # use separate hyperparamter search routine
s1 = smooth_local_search(current_rules, df, Y, lambda_array, 0.33, 0.33)
s2 = smooth_local_search(current_rules, df, Y, lambda_array, 0.33, -1.0)
f1 = func_evaluation(s1, current_rules, df, Y, lambda_array)
f2 = func_evaluation(s2, current_rules, df, Y, lambda_array)
soln_set = None
if f1 > f2:
soln_set = s1
else:
soln_set = s2
time2 = time.time()
duration = time2 - time1
rule_count = rule_count
benchmark_data.append(dict(
duration=duration,
rule_count=rule_count
))
# In[27]:
benchmark_dataframe_lakkaraju = pd.DataFrame(benchmark_data)
# In[28]:
benchmark_dataframe_lakkaraju.to_csv("./results/titanic_rule_size_benchmark_lakkaraju.csv", index=False)
# # PyIDS
# ## PyIDS setup
# In[35]:
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn.metrics import accuracy_score, auc, roc_auc_score
from pyids.ids_rule import IDSRule
from pyids.ids_ruleset import IDSRuleSet
from pyids.ids_objective_function import ObjectiveFunctionParameters, IDSObjectiveFunction
from pyids.ids_optimizer import RSOptimizer, SLSOptimizer
from pyids.ids_cacher import IDSCacher
from pyids.ids_classifier import IDS, mine_CARs
from pyarc.qcba import *
from pyarc.algorithms import createCARs, top_rules
from pyarc import TransactionDB
df = | pd.read_csv("../../data/titanic.csv") | pandas.read_csv |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import warnings
np.random.seed(1000)
import string
def get_car_prices_year():
# getting the data
interesting_columns = ['price', 'year']
cars_df = pd.read_csv('data/USA_cars_datasets.csv')[interesting_columns]
year = cars_df['year']
car_prices = cars_df['price']
return car_prices, year
def check_currency_change(prices, year):
print("The correlation is the same regardless of the units used, as it is normalized with variance. \
\nThe covariance, however, is affected by units used. In USD the covariance was:")
print(prices.cov(year))
eur_prices = prices * 0.85
print("But in EUR the covariance is:")
print(eur_prices.cov(year))
def get_car_prices_mileage():
# getting the data
interesting_columns = ['price', 'mileage']
cars_df = pd.read_csv('data/USA_cars_datasets.csv')[interesting_columns]
# getting data without outliers
mileage = cars_df['mileage']
car_prices_normal = cars_df['price']
# adding an outlier
car_prices_with_outliers = car_prices_normal.copy()
car_prices_with_outliers.loc[528] = 200000
return mileage, car_prices_normal, car_prices_with_outliers
def plot_car_prices_and_mileage():
warnings.filterwarnings('ignore')
mileage, car_prices_normal, car_prices_with_outliers = get_car_prices_mileage()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 4))
pd.concat([mileage, car_prices_normal], axis=1).plot(kind='scatter',
y='mileage',
x='price',
ax=axes[0],
title='Mileage and Price of cars')
pd.concat([mileage, car_prices_with_outliers], axis=1).plot(kind='scatter',
y='mileage',
x='price',
ax=axes[1],
title='Mileage and Price of cars (with an outlier)')
warnings.filterwarnings('always')
plt.show()
def plot_correlation_bargraph(p_norm, p_out, s_norm, s_out):
results = pd.Series({
'Pearson without outlier': p_norm,
'Pearson with outlier': p_out,
'Spearman without outlier': s_norm,
'Spearman with outlier': s_out,
})
print('Pearson without outlier :', p_norm)
print('Pearson with outlier :', p_out)
print('Spearman without outlier:', s_norm)
print('Spearman with outlier :', s_out)
results.plot(kind='barh')
plt.show()
def get_house_prices_and_rooms():
# getting the data
interesting_columns = ['house_price', 'number_of_rooms']
houses_df = | pd.read_csv('data/HousingData.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 15:22:38 2018
@author: Administrator
"""
# In[1]:
#dual thrust is an opening range breakout strategy
#it is very similar to London Breakout
#please check London Breakout if u have any questions
# https://github.com/tattooday/quant-trading/blob/master/London%20Breakout%20backtest.py
#Initially we set up upper and lower thresholds based on previous days open, close, high and low
#When the market opens and the price exceeds thresholds, we would take long/short positions prior to upper/lower thresholds
#However, there is no stop long/short position in this strategy
#We clear all positions at the end of the day
#rules of dual thrust can be found in the following link
# https://www.quantconnect.com/tutorials/dual-thrust-trading-algorithm/
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# In[2]:
os.chdir('D:/')
# In[3]:
#data frequency convertion from minute to intra daily
#as we are doing backtesting, we have already got all the datasets we need
#we can create a table to store all open, close, high and low prices
#and calculate the range before we get to signal generation
#otherwise, we would have to put this part inside the loop
#it would greatly increase the time complexity
#however, in real time trading, we do not have futures price
#we have to store all past information in sql db
#we have to calculate the range from db before the market opens
def min2day(df,column,year,month,rg):
#lets create a dictionary
#we use keys to classify different info we need
memo={'date':[],'open':[],'close':[],'high':[],'low':[]}
#no matter which month
#the maximum we can get is 31 days
#thus, we only need to run a traversal on 31 days
#nevertheless, not everyday is a workday
#assuming our raw data doesnt contain weekend prices
#we use try function to make sure we get the info of workdays without errors
#note that i put date at the end of the loop
#the date appendix doesnt depend on our raw data
#it only relies on the range function above
#we could accidentally append weekend date if we put it at the beginning of try function
#not until the program cant find price in raw data will the program stop
#by that time, we have already appended weekend date
#we wanna make sure the length of all lists in dictionary are the same
#so that we can construct a structured table in the next step
for i in range(1,32):
try:
temp=df['%s-%s-%s 3:00:00'%(year,month,i):'%s-%s-%s 12:00:00'%(year,month,i)][column]
memo['open'].append(temp[0])
memo['close'].append(temp[-1])
memo['high'].append(max(temp))
memo['low'].append(min(temp))
memo['date'].append('%s-%s-%s'%(year,month,i))
except Exception:
pass
intraday=pd.DataFrame(memo)
intraday.set_index(pd.to_datetime(intraday['date']),inplace=True)
#preparation
intraday['range1']=intraday['high'].rolling(rg).max()-intraday['close'].rolling(rg).min()
intraday['range2']=intraday['close'].rolling(rg).max()-intraday['low'].rolling(rg).min()
intraday['range']=np.where(intraday['range1']>intraday['range2'],intraday['range1'],intraday['range2'])
return intraday
#signal generation
#even replace assignment with pandas.at
#it still takes a while for us to get the result
#any optimization suggestion besides using numpy array?
def signal_generation(df,intraday,param,column,rg):
#as the lags of days have been set to 5
#we should start our backtesting after 4 workdays of current month
#cumsum is to control the holding of underlying asset
#sigup and siglo are the variables to store the upper/lower threshold
#upper and lower are for the purpose of tracking sigup and siglo
signals=df[df.index>=intraday['date'].iloc[rg-1]]
signals['signals']=0
signals['cumsum']=0
signals['upper']=0.0
signals['lower']=0.0
sigup=float(0)
siglo=float(0)
#for traversal on time series
#the tricky part is the slicing
#we have to either use [i:i] or pd.Series
#first we set up thresholds at the beginning of london market
#which is est 3am
#if the price exceeds either threshold
#we will take long/short positions
for i in signals.index:
#note that intraday and dataframe have different frequencies
#obviously different metrics for indexes
#we use variable date for index convertion
date='%s-%s-%s'%(i.year,i.month,i.day)
#market opening
#set up thresholds
if (i.hour==3 and i.minute==0):
sigup=float(param*intraday['range'][date]+pd.Series(signals[column])[i])
siglo=float(-(1-param)*intraday['range'][date]+pd.Series(signals[column])[i])
#thresholds got breached
#signals generating
if (sigup!=0 and pd.Series(signals[column])[i]>sigup):
signals.at[i,'signals']=1
if (siglo!=0 and pd.Series(signals[column])[i]<siglo):
signals.at[i,'signals']=-1
#check if signal has been generated
#if so, use cumsum to verify that we only generate one signal for each situation
if pd.Series(signals['signals'])[i]!=0:
signals['cumsum']=signals['signals'].cumsum()
if (pd.Series(signals['cumsum'])[i]>1 or pd.Series(signals['cumsum'])[i]<-1):
signals.at[i,'signals']=0
#if the price goes from below the lower threshold to above the upper threshold during the day
#we reverse our positions from short to long
if (pd.Series(signals['cumsum'])[i]==0):
if (pd.Series(signals[column])[i]>sigup):
signals.at[i,'signals']=2
if (pd.Series(signals[column])[i]<siglo):
signals.at[i,'signals']=-2
#by the end of london market, which is est 12pm
#we clear all opening positions
#the whole part is very similar to London Breakout strategy
if i.hour==12 and i.minute==0:
sigup,siglo=float(0),float(0)
signals['cumsum']=signals['signals'].cumsum()
signals.at[i,'signals']=-signals['cumsum'][i:i]
#keep track of trigger levels
signals.at[i,'upper']=sigup
signals.at[i,'lower']=siglo
return signals
#plotting the positions
def plot(signals,intraday,column):
#we have to do a lil bit slicing to make sure we can see the plot clearly
#the only reason i go to -3 is that day we execute a trade
#give one hour before and after market trading hour for as x axis
date=pd.to_datetime(intraday['date']).iloc[-3]
signew=signals['%s-%s-%s 02:00:00'%(date.year,date.month,date.day):'%s-%s-%s 13:00:00'%(date.year,date.month,date.day)]
fig=plt.figure(figsize=(10,5))
ax=fig.add_subplot(111)
#mostly the same as other py files
#the only difference is to create an interval for signal generation
ax.plot(signew.index,signew[column],label=column)
ax.fill_between(signew.loc[signew['upper']!=0].index,signew['upper'][signew['upper']!=0],signew['lower'][signew['upper']!=0],alpha=0.2,color='#355c7d')
ax.plot(signew.loc[signew['signals']==1].index,signew[column][signew['signals']==1],lw=0,marker='^',markersize=10,c='g',label='LONG')
ax.plot(signew.loc[signew['signals']==-1].index,signew[column][signew['signals']==-1],lw=0,marker='v',markersize=10,c='r',label='SHORT')
#change legend text color
lgd=plt.legend(loc='best').get_texts()
for text in lgd:
text.set_color('#6C5B7B')
#add some captions
plt.text('%s-%s-%s 03:00:00'%(date.year,date.month,date.day),signew['upper']['%s-%s-%s 03:00:00'%(date.year,date.month,date.day)],'Upper Bound',color='#C06C84')
plt.text('%s-%s-%s 03:00:00'%(date.year,date.month,date.day),signew['lower']['%s-%s-%s 03:00:00'%(date.year,date.month,date.day)],'Lower Bound',color='#C06C84')
plt.ylabel(column)
plt.xlabel('Date')
plt.title('Dual Thrust')
plt.grid(True)
plt.show()
# In[4]:
def main():
#similar to London Breakout
#my raw data comes from the same website
# http://www.histdata.com/download-free-forex-data/?/excel/1-minute-bar-quotes
#just take the mid price of whatever currency pair you want
df=pd.read_csv('gbpusd.csv')
df.set_index( | pd.to_datetime(df['date']) | pandas.to_datetime |
import asyncio
import re
import time
import warnings
from multiprocessing import Pool
import aiohttp
import logbook
import numpy as np
import pandas as pd
from ..mongodb import get_db
from ..scripts.trading_calendar import is_trading_day
from ..setting.constants import MARKET_START, MAX_WORKER, QUOTE_COLS
from ..utils import batch_loop, data_root, make_logger
from ..utils.db_utils import to_dict
db_name = 'quotes'
logger = make_logger('实时报价')
warnings.filterwarnings('ignore')
QUOTE_PATTERN = re.compile('"(.*)"')
CODE_PATTERN = re.compile(r'hq_str_s[zh](\d{6})')
def _convert_to_numeric(s, exclude=()):
if pd.api.types.is_string_dtype(s):
if exclude:
if s.name not in exclude:
return pd.to_numeric(s, errors='coerce')
return s
def _to_dataframe(content):
"""解析网页数据,返回DataFrame对象"""
res = [x.split(',') for x in re.findall(QUOTE_PATTERN, content)]
codes = [x for x in re.findall(CODE_PATTERN, content)]
df = pd.DataFrame(res).iloc[:, :32]
df.columns = QUOTE_COLS[1:]
df.insert(0, '股票代码', codes)
df.dropna(inplace=True)
return df
def _add_prefix(stock_code):
pre = stock_code[0]
if pre == '6':
return 'sh{}'.format(stock_code)
else:
return 'sz{}'.format(stock_code)
async def fetch(codes):
url_fmt = 'http://hq.sinajs.cn/list={}'
url = url_fmt.format(','.join(map(_add_prefix, codes)))
async with aiohttp.request('GET', url) as r:
data = await r.text()
return data
async def to_dataframe(codes):
"""解析网页数据,返回DataFrame对象"""
content = await fetch(codes)
df = _to_dataframe(content)
df = df.apply(_convert_to_numeric, exclude=('股票代码', '股票简称', '日期', '时间'))
df = df[df.成交额 > 0]
if len(df) > 0:
df['时间'] = pd.t | o_datetime(df.日期 + ' ' + df.时间) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""car_damage_detection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1xINwPZHsF1euVtX3NVdY0fnrCxo3hbHM
"""
from google.colab import drive
drive.mount('/gdrive')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import Callback, EarlyStopping
from sklearn.metrics import classification_report, confusion_matrix
data = pd.read_csv('/gdrive/My Drive/Colab Notebooks/car_damage_detection/damage_detection')
data.head()
data.isnull().sum()
counts = data['class'].value_counts()
sns.barplot(x=counts.index, y=counts)
plt.xlabel('Class')
plt.ylabel('Count')
plt.xticks(rotation=40);
path = '/gdrive/My Drive/Colab Notebooks/car_damage_detection/'
def edit_path_img(x):
return path + x
data["image"] = data["image"].apply(edit_path_img)
data.head()
datafig, axes = plt.subplots(nrows=4, ncols=2, figsize=(10, 8),
subplot_kw={'xticks': [], 'yticks': []})
for i, ax in enumerate(axes.flat):
ax.imshow(plt.imread(data.image[i]))
ax.set_title(data['class'][i])
plt.tight_layout()
plt.show()
train_df, test_df = train_test_split(data, test_size=0.2)
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
validation_split=0.2
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input
)
train_gen = train_datagen.flow_from_dataframe(
dataframe=train_df,
x_col='image',
y_col='class',
target_size=(224, 224),
color_mode='rgb',
batch_size=32,
shuffle=True,
seed=0
)
val_gen = train_datagen.flow_from_dataframe(
dataframe=train_df,
x_col='image',
y_col='class',
target_size=(224, 224),
batch_size=32,
shuffle=True,
seed=0
)
test_gen = test_datagen.flow_from_dataframe(
dataframe=test_df,
x_col='image',
y_col='class',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=False
)
pretrained_model = MobileNetV2(
input_shape=(224, 224, 3),
include_top=False,
weights='imagenet',
pooling='avg'
)
pretrained_model.trainable = False
inputs = pretrained_model.input
x = Dense(120, activation='relu')(pretrained_model.output)
x = Dense(120, activation='relu')(x)
outputs = Dense(8, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
my_callbacks = [EarlyStopping(monitor='val_accuracy',
min_delta=0,
patience=2,
mode='auto')]
history = model.fit(train_gen, validation_data=val_gen, epochs=50, callbacks=my_callbacks)
| pd.DataFrame(history.history) | pandas.DataFrame |
import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
import folium
import base64
import xlsxwriter
from xlsxwriter import Workbook
from geopy.distance import great_circle
from io import BytesIO
from collections import Counter
from PIL import Image
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
(pd.set_option('display.float_format', lambda x: '%.3f' % x))
st.set_page_config(layout='wide')
image=Image.open('images/HR.png')
st.sidebar.image(image,use_column_width=True,caption='House Rocket Company')
menu = st.sidebar.radio('Selecione uma das opções de página do Projeto:',
('Data Overview','Insights','Business Solution'))
st.sidebar.write('Para mais informações sobre o projeto, acesse: '"[GitHub](https://github.com/RaulBelarmino/house-rocket-insights/)")
def get_data(path):
data = pd.read_csv(path)
return data
def get_data_clean():
data = pd.read_csv('datasets/data_clean.csv')
return data
def get_data_solution():
data = pd.read_csv('datasets/kc_houses_solution.csv')
return data
def data_overview(data):
st.markdown(
"<h1 style='text-align: center; color: #565656; background: #FADBD8'> Data Overview </h1>",
unsafe_allow_html=True)
st.write(data.head(100))
# Overview map
df1 = data.copy()
# Base map
density_map = folium.Map(location=[df1['lat'].mean(), df1['long'].mean()],
default_zoom_start=15)
make_cluster = MarkerCluster().add_to(density_map)
for name, row in df1.iterrows():
folium.Marker([row['lat'], row['long']],
popup='Price R${0} on: {1}. Sqft: {2} \n\nId: {3} '
'Bedrooms: {4} Bathrooms: {5} '
'Year Built: {6}'.format(row['price'],
row['date'],
row['sqft_lot'],
row['id'],
row['bedrooms'],
row['bathrooms'],
row['yr_built'])).add_to(make_cluster)
folium_static(density_map, width=865, height=400)
# descriptive statistics
df = data.copy()
df['id'] = df.astype(str)
c1, c2 = st.beta_columns((1, 1))
# central tendency metrics
attributes_num = df.select_dtypes(include=['int64', 'float64'])
df_mean = pd.DataFrame(attributes_num.apply(np.mean))
df_median = pd.DataFrame(attributes_num.apply(np.median))
# measures of dispersion
df_min = pd.DataFrame(attributes_num.apply(np.min))
df_max = pd.DataFrame(attributes_num.apply(np.max))
df_std = pd.DataFrame(attributes_num.apply(np.std))
statics = pd.concat([df_mean, df_median, df_min, df_max, df_std], axis=1).reset_index()
statics.columns = ['attributes', 'mean', 'median', 'min', 'max', 'std']
statics = statics.iloc[
[True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False,
True, True], :]
c1.header('Statistcs Descriptive')
c1.dataframe(statics, height=1000)
# Average Metrics
df['sqm_living'] = df['sqft_living'] / 10.764
df['sqm_lot'] = df['sqft_lot'] / 10.764
df['price_sqm'] = df['price'] / df['sqm_living']
df1 = df[['id', 'zipcode']].groupby('zipcode').count().reset_index()
df2 = df[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
df3 = df[['sqm_living', 'zipcode']].groupby('zipcode').mean().reset_index()
df4 = df[['price_sqm', 'zipcode']].groupby('zipcode').mean().reset_index()
# Merge
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = pd.merge(m1, df3, on='zipcode', how='inner')
m3 = pd.merge(m2, df4, on='zipcode', how='inner')
m3.columns = ['Zipcode', 'Total Houses', 'Price', 'M² Living', 'Price m²']
c2.header('Average Metrics')
c2.dataframe(m3, height=430)
return None
def hypotheses(df_clean):
st.markdown(
"<h1 style='text-align: center; color: #565656; background: #F1948A'> Hipóteses de Negócio </h1>",
unsafe_allow_html=True)
df2 = df_clean.copy()
st.write('Apresentação das hipóteses de negócio')
if st.checkbox('Top Insights'):
st.markdown('**Insights mais relevantes para o projeto:**')
st.markdown(
'**H4:** A mediana de preço de imóveis com 2 andares ou mais, com vista para água é 20% mais alta, que imóveis com 1 andar e com vista para água')
st.markdown(
'**Verdadeiro**, imóveis com 2 andares ou mais são em média 94% mais valorizados do que imóveis single-story(casa térrea), com vista para água.')
st.markdown('**H6:** Imóveis em más condições são 30%, mais baratos que imóveis com boas condições')
st.markdown('**Falso**, há diferença é de aproximadamente 60% em média.')
st.markdown('**H10:** Imóveis de 3 quartos e 2 banheiros devem ser 10% mais caros que imóveis de 3 quartos e 1 banheiro.')
st.markdown('**Falso**, os imóveis com 3 quartos e 2 banheiro são mais caros, porém aproximadamente 29%.')
c1, c2 = st.beta_columns(2)
c3, c4 = st.beta_columns(2)
c5, c6 = st.beta_columns(2)
c7, c8 = st.beta_columns(2)
c9, c10 = st.beta_columns(2)
# H1
c1.subheader('H1: Imóveis que possuem vista para água deveriam ser mais caros na média.')
h1 = df2[['waterfront','price']].groupby('waterfront').median().reset_index()
h1['waterfront'] = h1['waterfront'].apply(lambda x: 'yes' if x == 1 else 'no')
fig = px.bar(h1, x='waterfront', y='price', color='waterfront')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c1.plotly_chart(fig, use_container_width=True)
# H2
c2.subheader('H2: Imóveis com vista para água deveriam ser 50% mais caros, na média, que imóveis próximos ao lago sem vista.')
h2 = df2.copy()
h2['lat_long'] = h2[['lat', 'long']].apply(lambda x: str(x['lat']) + ',' + str(x['long']), axis=1)
# lat and long of 4 spots of lake washington
lake_tuple1 = 47.508853, -122.219156
lake_tuple2 = 47.593199, -122.228501
lake_tuple3 = 47.667237, -122.232624
lake_tuple4 = 47.744864, -122.269727
# distance from Lake in km
h2['dist_fromlake1'] = h2['lat_long'].apply(lambda x: great_circle(lake_tuple1, x).km)
h2['dist_fromlake2'] = h2['lat_long'].apply(lambda x: great_circle(lake_tuple2, x).km)
h2['dist_fromlake3'] = h2['lat_long'].apply(lambda x: great_circle(lake_tuple3, x).km)
h2['dist_fromlake4'] = h2['lat_long'].apply(lambda x: great_circle(lake_tuple4, x).km)
h2 = h2[(h2['dist_fromlake1'] < 5) | (h2['dist_fromlake2'] < 5) | (h2['dist_fromlake3'] < 5) | (h2['dist_fromlake4'] < 5)]
h2['built'] = h2['yr_built'].apply(lambda x: '<1955' if x <= 1955 else '>1955')
h2 = h2[['waterfront','price']].groupby('waterfront').mean().reset_index()
h2['waterfront'] = h2['waterfront'].apply(lambda x: 'yes' if x == 1 else 'no')
fig = px.bar(h2, x='waterfront', y='price', color='waterfront')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c2.plotly_chart(fig, use_container_width=True)
# H3
c3.subheader('H3: Imóveis com data de construção menor que 1955 sem renovação deveriam ser mais baratos, na média.')
h3 = df2.copy()
h3 = h3[['built','renovated','price']].groupby(['built','renovated']).mean().reset_index()
fig = px.bar(h3, x='built', y='price', color='renovated', barmode='group')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c3.plotly_chart(fig, use_container_width=True)
# H4
c4.subheader('H4. A mediana de preço de imóveis com 2 andares ou mais, com vista para água deveria ser mais alta, que imóveis com 1 andar e com vista para água.')
h4 = df2.copy()
h4 = h4[h4['waterfront'] == 1]
h4 = h4[['floors_type','waterfront','price']].groupby(['floors_type','waterfront']).median().reset_index()
fig = px.bar(h4, x='floors_type', y='price', color='floors_type')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c4.plotly_chart(fig, use_container_width=True)
# H5
c5.subheader('H5. Imóveis renovados são 20% mais caros.')
h5 = df2.copy()
h5 = h5[['renovated','price']].groupby('renovated').median().reset_index()
fig = px.bar(h5, x='renovated', y='price', color='renovated')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c5.plotly_chart(fig, use_container_width=True)
# H6
c6.subheader('H6. Imóveis em más condições devem ser mais baratos que imóveis com boas condições.')
h6 = df2.copy()
h6 = h6[['condition_type','price']].groupby('condition_type').median().reset_index()
fig = px.bar(h6, x='condition_type', y='price', color='condition_type')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c6.plotly_chart(fig, use_container_width=True)
# H7
c7.subheader('H7. Imóveis em más condições devem ser mais baratos que imóveis com boas condições.')
h7 = df2.copy()
h7 = h7[['bathrooms', 'price']].groupby('bathrooms').mean().reset_index()
pct = h7[['price']].pct_change().fillna(0).reset_index()
pct.columns = ['bathrooms', 'price']
p = round(pct['price'], 2)
fig = px.bar(h7, x='bathrooms', y='price', text=p, color_discrete_sequence=px.colors.qualitative.Plotly)
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c7.plotly_chart(fig, use_container_width=True)
# H8
c8.subheader('H8. Quanto maior o atributo grade do imóvel, a média de preço deve ser maior.')
h8 = df2.copy()
h8 = h8[['grade', 'price']].groupby('grade').mean().reset_index()
fig = px.bar(h8, x='grade', y='price', color='grade')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c8.plotly_chart(fig, use_container_width=True)
# H9
c9.subheader('H9. Imóveis de 3 quartos e 2 banheiros devem ser 10% mais caros que imóveis de 3 quartos e 1 banheiro.')
h9 = df2.copy()
h9 = h9[(h9['bedrooms'] == 3) & (h9['bathrooms'] <= 2) ]
h9 = h9[h9['bathrooms'] != 0]
h9 = h9[['bathrooms', 'price']].groupby('bathrooms').mean().reset_index()
fig = px.bar(h9, x='bathrooms', y='price', color='bathrooms')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c9.plotly_chart(fig, use_container_width=True)
# H10
c10.subheader('H10. Imóveis com porão deveriam ser mais caros que imóveis sem porão.')
h10 = df2.copy()
h10 = h10[['basement','price']].groupby('basement').mean().reset_index()
fig = px.bar(h10, x='basement', y='price', color='basement')
fig.update_layout(height=430, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
c10.plotly_chart(fig, use_container_width=True)
return None
def solution(data_solution):
st.markdown(
"<h1 style='text-align: center; color: #FFFFFF; background: #FB6A6A'> Business Solution </h1>",
unsafe_allow_html=True)
data = data_solution.copy()
st.write('O objetivo final desse projeto era responder a duas questões principais:')
st.write('1. Quais casas o CEO da House Rocket deveria comprar e por qual preço de compra?')
st.write('Após a criação de novas features que são reponsáveis por apresentar os melhores imóveis '
'para revenda. Features como a mediana do preço do imóvel por zipcode, a mediana do preço '
' da região + season, e filtrando os imóveis que estão em boas condições. Foram encontradas 5172 '
'imóveis com potencial para venda')
data['marker_color'] = ''
for i, row in data.iterrows():
if (row['status'] == 'buy'):
data.loc[i, 'marker_color'] = 'green'
else:
data.loc[i, 'marker_color'] = 'red'
if st.checkbox('Mostrar mapa dos imóveis para compra'):
mapa = folium.Map(width=600, height=350,
location=[data['lat'].mean(),data[ 'long'].mean()],
default_zoom_start=30)
features = {}
for row in pd.unique(data['marker_color']):
features[row] = folium.FeatureGroup(name=row)
for index, row in data.iterrows():
circ = folium.Circle([row['lat'], row['long']],
radius=150, color=row['marker_color'], fill_color=row['marker_color'],
fill_opacity=1, popup='Compra: {0}, Preço: {1}'.format(row['status'],
row['price']))
circ.add_to(features[row['marker_color']])
for row in pd.unique(data["marker_color"]):
features[row].add_to(mapa)
folium.LayerControl().add_to(mapa)
folium_static(mapa)
st.write('2. Uma vez a casa em posse da empresa, qual o melhor momento para vendê-las e qual seria o preço da venda?')
st.write('Com os imóveis aptos para compra selecionados, e com as medianas de preço das estações do ano por região '
'descobertas, foi calculado o valor de venda. Caso o preço do imóvel for menor que a mediana da season'
' + região, acrescimo de 30% no valor da compra, caso contrário o acrescimo é de 10%.')
st.header('Tabela com os melhores imóveis para negócio')
df = data[['id', 'zipcode', 'season', 'price', 'price_median', 'status', 'sell_price', 'profit', 'best_season']]
report = df[(df['best_season'] != 'no_season') & (df['status'] == 'buy')].sort_values('id', ascending=True).reset_index()
report = report.drop('index', axis=1)
st.write(report)
get_df = report.copy()
st.markdown(get_table_download_link(get_df), unsafe_allow_html=True)
season = data[(data['best_season'] != 'no_season') & (data['status'] == 'buy')].copy()
season = season['best_season'].tolist()
season = ','.join(season)
season = season.split(',')
season_count = Counter(season)
season_count = pd.DataFrame(([season_count]))
season_count = season_count.melt().sort_values('value', ascending=False)
st.header('Qual o melhor momento para venda?')
st.write('Referente a tabela anterior, o gráfico representa recorrência das estações como melhor período para venda.')
fig = px.bar(season_count, x='variable', y='value', color='variable')
fig.update_layout(height=200, margin={'l': 0, 'b': 0, 'r': 0, 't': 0})
st.plotly_chart(fig, use_container_width=True)
st.write('Também foi realizado um filtro para sugerir a compra dos Top 20 imóveis, por lucratividade, '
'por baixo investimento e um Bônus de imóveis para reforma com maior ganho.')
filter = st.radio('Selecione o filtro para sugestão:', ('Filtro 1: Lucratividade','Filtro 2: Baixo investimento','Bônus: Imóveis para Reforma com maior ganho'))
if filter == 'Filtro 1: Lucratividade':
report = report[(report['best_season'] != 'no_season') & (report['status'] == 'buy')].sort_values('profit', ascending=False)
sample = report.iloc[0:21,:].copy()
dic = {"Investimento Inicial": sample['price'].sum(), '<NAME>': sample['profit'].sum()}
capital = pd.Series(dic).to_frame('Valor USD')
st.table(capital)
st.header('Tabela com Top 20 por Lucratividade')
st.write(sample)
if filter == 'Filtro 2: Baixo investimento':
report = report[(report['best_season'] != 'no_season') & (report['status'] == 'buy')].sort_values('price', ascending=True)
sample2 = report.iloc[0:21,:].copy()
dic2 = {"Investimento Inicial": sample2['price'].sum(), '<NAME>': sample2['profit'].sum()}
capital2 = pd.Series(dic2).to_frame('Valor USD')
st.table(capital2)
st.header('Baixo investimento')
st.write(sample2)
if filter == 'Bônus: Imóveis para Reforma com maior ganho':
report3 = data[(data['best_season'] != 'no_season') & (data['status'] == 'dont buy')]
report3 = report3[report3['condition'] < 3].sort_values('profit',ascending=False)
report3 = report3[['id', 'zipcode', 'season', 'price', 'price_median', 'condition', 'sell_price', 'profit', 'best_season']]
sample3 = report3.iloc[0:21,:].copy().reset_index()
sample3 = sample3.drop('index', axis=1)
for i in range(len(sample3)):
if sample3.loc[i, 'condition'] == 2:
sample3.loc[i, 'renovate_cost'] = sample3.loc[i, 'price'] * 0.08
else:
sample3.loc[i, 'renovate_cost'] = sample3.loc[i, 'price'] * 0.10
for i in range(len(sample3)):
sample3.loc[i, 'profit_adjusted'] = sample3.loc[i, 'profit'] - sample3.loc[i, 'renovate_cost']
sample3 = sample3[['id', 'zipcode', 'season', 'price', 'price_median', 'condition','renovate_cost','sell_price', 'profit_adjusted', 'best_season']]
dic3 = {"Investimento Inicial": sample3['price'].sum(),
"Investimento em reforma": sample3['renovate_cost'].sum(), '<NAME>': sample3['profit_adjusted'].sum()}
capital3 = pd.Series(dic3).to_frame('Valor USD')
st.table(capital3)
st.markdown('**O valor da reforma foi calculado da seguinte forma: **')
st.write('8% do valor da compra de imóveis em condição 2 e 10% para imóveis em condições 1')
st.header('Bônus: Imóveis para Reforma com maior ganho')
st.write(sample3)
transform_profit(data_solution, report)
return None
def transform_profit(data_solution, report):
report1 = report[(report['best_season'] != 'no_season') & (report['status'] == 'buy')].sort_values('profit',
ascending=False)
sample = report1.iloc[0:21, :].copy()
report2 = report[(report['best_season'] != 'no_season') & (report['status'] == 'buy')].sort_values('price',
ascending=True)
sample2 = report2.iloc[0:21, :].copy()
report3 = data_solution[(data_solution['best_season'] != 'no_season') & (data_solution['status'] == 'dont buy')]
report3 = report3[report3['condition'] < 3].sort_values('profit', ascending=False)
report3 = report3[
['id', 'zipcode', 'season', 'price', 'price_median', 'condition', 'sell_price', 'profit', 'best_season']]
sample3 = report3.iloc[0:21, :].copy().reset_index()
sample3 = sample3.drop('index', axis=1)
for i in range(len(sample3)):
if sample3.loc[i, 'condition'] == 2:
sample3.loc[i, 'renovate_cost'] = sample3.loc[i, 'price'] * 0.08
else:
sample3.loc[i, 'renovate_cost'] = sample3.loc[i, 'price'] * 0.10
for i in range(len(sample3)):
sample3.loc[i, 'profit_adjusted'] = sample3.loc[i, 'profit'] - sample3.loc[i, 'renovate_cost']
sample3 = sample3[['id', 'zipcode', 'season', 'price', 'price_median', 'condition', 'renovate_cost', 'sell_price',
'profit_adjusted', 'best_season']]
sample.reset_index(drop=True, inplace=True)
sample2.reset_index(drop=True, inplace=True)
sample3.reset_index(drop=True, inplace=True)
if st.checkbox('Mostrar comparativo e observações'):
st.subheader('Tabela comparativa')
profits = | pd.concat([sample['profit'], sample2['profit'], sample3['profit_adjusted']], axis=1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.