prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import json
import logging
import time
import sys
import os
import numpy as np
import pandas as pd
import pathlib
import sys
import math
script_version = 1.79
project_dir = pathlib.Path().absolute().parent
sys.path.append(str(project_dir))
from cf_common.CfClient import *
class RollingStats:
"""Creates rolling window statistics object
Inputs are sample window size and number of digits to round values too.
For example:
- transactions per second window size can be 2 or higher with 0 round digits
- time to first byte can have 1 round digit, best is to use the same window size
"""
def __init__(self, sample_window_size, round_digits):
# initiate list with sample size count of zeros
self.sample_size = sample_window_size
self.round_digits = round_digits
self.list = [0] * self.sample_size
self.current_value = 0
self.avg_val = 0
self.avg_val_last = 0
self.increase_avg = 0
self.variance = 0.000
self.avg_max_load_variance = 0.00
self.new_high = False
self.highest_value = 0
self.not_high_count = 0
self.stable = False
self.stable_count = 0
def update(self, new_value):
"""Updates Rolling List and returns current variance
:param new_value: new single value of for example TPS or TTFB
:return: variance
"""
self.current_value = new_value
if len(self.list) == self.sample_size:
self.list.pop(0)
self.list.append(self.current_value)
self.avg_val = sum(self.list) / len(self.list)
self.avg_val = round(self.avg_val, self.round_digits)
if self.round_digits == 0:
self.avg_val = int(self.avg_val)
max_var = max(self.list) - min(self.list)
self.variance = (max_var / self.avg_val) if self.avg_val != 0 else 0
self.variance = round(self.variance, 3)
# check if new value value is the new high for later use
self.check_if_highest()
return self.variance
def reset(self):
"""Resets rolling window back to all 0
Can be used after load increase on stat as current load that tracks if load is stable after increase
Don't use on reported rolling stat as it will have high increase after its set to all 0
:return: None
"""
self.list = [0] * self.sample_size
def check_if_stable(self, max_var_reference):
"""Checks if load is stable in current list
If its stable also check the increase since a load change was last completed.
:param max_var_reference: user/test configured reference value, e.g. 0.03 for 3%
:return: True if stable, False if not
"""
if self.variance <= max_var_reference:
self.stable = True
self.stable_count += 1
self.increase_since_last_load_change()
return True
else:
self.stable = False
self.stable_count = 0
return False
def increase_since_last_load_change(self):
"""Sets increase_avg, the increase since last load
This function can be called from check_if_stable. The set result can be used by a function to
determine by how much to increase the load. After load change call load_increase_complete to
set the value for the next round.
:return: None
"""
if self.avg_val_last != 0:
self.increase_avg = (
(self.avg_val - self.avg_val_last) / self.avg_val_last
) * 100
self.increase_avg = round(self.increase_avg, 2)
else:
self.avg_val_last = 1
def load_increase_complete(self):
"""set last load change value
Use in combination with increase_since_last_load_change
:return: None
"""
self.avg_val_last = self.avg_val
def check_if_highest(self):
"""Checks and sets highest value reference
Can be called by update function to track if the current update is the new highest value
:return: True if new high, False if not higher vs. previous
"""
if self.highest_value < self.avg_val:
self.highest_value = self.avg_val
self.new_high = True
self.not_high_count = 0
else:
self.new_high = False
self.not_high_count += 1
self.avg_max_load_variance = (
(self.avg_val / self.highest_value) if self.highest_value != 0 else 0
)
self.avg_max_load_variance = round(self.avg_max_load_variance, 2)
if self.new_high:
return True
if not self.new_high:
return False
class CfRunTest:
def __init__(self, cf, test_details, result_file, temp_file_dir):
log.info(f"script version: {script_version}")
self.cf = cf # CfClient instance
self.result_file = result_file
self.temp_dir = temp_file_dir
self.test_id = test_details["id"]
self.type_v2 = test_details["type"]
self.in_name = test_details["name"]
self.in_run = test_details["run"]
self.in_load_type = test_details["load_type"]
self.in_start_load = test_details["start_load"]
self.in_incr_low = int(test_details["incr_low"])
self.in_incr_med = int(test_details["incr_med"])
self.in_incr_high = int(test_details["incr_high"])
self.in_duration = int(test_details["duration"])
self.in_startup = int(test_details["startup"])
self.in_rampup = int(test_details["rampup"])
self.in_rampdown = int(test_details["rampdown"])
self.in_shutdown = int(test_details["shutdown"])
self.in_sustain_period = int(test_details["sustain_period"])
self.in_kpi_1 = test_details.get("kpi_1", "tps")
self.in_kpi_2 = test_details.get("kpi_2", "cps")
self.in_kpi_and_or = self.return_bool_true(test_details.get("kpi_and_or"), "AND")
self.in_threshold_low = float(test_details["low_threshold"])
self.in_threshold_med = float(test_details["med_threshold"])
self.in_threshold_high = float(test_details["high_threshold"])
self.in_sustain_period = int(test_details["sustain_period"])
self.variance_sample_size = int(test_details["variance_sample_size"])
self.in_max_variance = float(test_details["max_variance"])
self.in_ramp_low = int(test_details.get("ramp_low", 60))
self.in_ramp_med = int(test_details.get("ramp_med", 40))
self.in_ramp_high = int(test_details.get("ramp_high", 20))
self.in_ramp_seek = self.if_in_set_true(test_details, "ramp_seek",
{"true", "y", "yes"})
self.in_ramp_seek_kpi = test_details.get("ramp_kpi", "tps")
self.in_ramp_seek_value = int(test_details.get("ramp_value", 1))
self.in_ramp_step = int(test_details.get("ramp_step", 1))
if not self.in_ramp_seek:
self.ramp_seek_complete = True
else:
self.ramp_seek_complete = False
self.living_simusers_max_bool = self.check_if_number(
test_details.get("living_simusers_max", False))
self.living_simusers_max = self.return_int_if_present(
self.living_simusers_max_bool,
test_details.get("living_simusers_max", False))
self.in_goal_seek = False
self.first_steady_interval = True
self.in_goal_seek = test_details["goal_seek"]
if self.in_goal_seek.lower() in {"true", "y", "yes"}:
self.in_goal_seek = True
self.first_steady_interval = False
else:
self.in_goal_seek = False
self.test_config = self.get_test_config()
self.queue_id = self.test_config["config"]["queue"]["id"]
self.queue_info = self.get_queue(self.queue_id)
self.queue_capacity = int(self.queue_info["capacity"])
log.info(f"queue_capacity: {self.queue_capacity}")
self.core_count = self.core_count_lookup(self.queue_info)
log.info(f"core_count: {self.core_count}")
self.client_port_count = len(self.test_config["config"]["interfaces"]["client"])
log.info(f"client_port_count: {self.client_port_count}")
self.server_port_count = len(self.test_config["config"]["interfaces"]["server"])
log.info(f"server_port_count: {self.server_port_count}")
self.client_core_count = int(
self.core_count
/ (self.client_port_count + self.server_port_count)
* self.client_port_count
)
log.info(f"client_core_count: {self.client_core_count}")
self.in_capacity_adjust = self.check_capacity_adjust(
test_details["capacity_adj"],
self.in_load_type,
self.client_port_count,
self.client_core_count,
)
log.info(f"in_capacity_adjust: {self.in_capacity_adjust}")
self.load_constraints = {"enabled": False}
if not self.update_config_load():
report_error = f"unknown load_type with test type"
log.debug(report_error)
print(report_error)
self.test_config = self.get_test_config()
self.test_run = self.start_test_run()
if not self.test_started:
report_error = f"test did not start\n{json.dumps(self.test_run, indent=4)}"
log.debug(report_error)
print(report_error)
self.test_run_update = None
self.id = self.test_run.get("id")
self.queue_id = self.test_run.get("queueId")
self.score = self.test_run.get("score")
self.grade = self.test_run.get("grade")
self.run_id = self.test_run.get("runId")
self.status = self.test_run.get("status") # main run status 'running'
self.name = self.test_run.get("test", {}).get("name")
self.type_v1 = self.test_run.get("test", {}).get("type")
self.sub_status = self.test_run.get("subStatus")
self.created_at = self.test_run.get("createdAt")
self.updated_at = self.test_run.get("updatedAt")
self.started_at = self.test_run.get("startedAt")
self.finished_at = self.test_run.get("finishedAt")
self.progress = self.test_run.get("progress")
self.time_elapsed = self.test_run.get("timeElapsed")
self.time_remaining = self.test_run.get("timeRemaining")
self.run_link = (
"https://"
+ self.cf.controller_ip
+ "/#livecharts/"
+ self.type_v1
+ "/"
+ self.id
)
print(f"Live charts: {self.run_link}")
self.report_link = None
self.sub_status = None # subStatus - none while running or not started
self.progress = 0 # progress - 0-100
self.time_elapsed = 0 # timeElapsed - seconds
self.time_remaining = 0 # timeRemaining - seconds
self.started_at = None # startedAt
self.finished_at = None # finishedAt
self.c_rx_bandwidth = 0
self.c_rx_packet_count = 0
self.c_rx_packet_rate = 0
self.c_tx_bandwidth = 0
self.c_tx_packet_count = 0
self.c_tx_packet_rate = 0
self.c_http_aborted_txns = 0
self.c_http_aborted_txns_sec = 0
self.c_http_attempted_txns = 0
self.c_http_attempted_txns_sec = 0
self.c_http_successful_txns = 0
self.c_http_successful_txns_sec = 0
self.c_http_unsuccessful_txns = 0
self.c_http_unsuccessful_txns_sec = 0
self.c_loadspec_avg_idle = 0
self.c_loadspec_avg_cpu = 0
self.c_memory_main_size = 0
self.c_memory_main_used = 0
self.c_memory_packetmem_used = 0
self.c_memory_rcv_queue_length = 0
self.c_simusers_alive = 0
self.c_simusers_animating = 0
self.c_simusers_blocking = 0
self.c_simusers_sleeping = 0
self.c_tcp_avg_ttfb = 0
self.c_tcp_avg_tt_synack = 0
self.c_tcp_cumulative_attempted_conns = 0
self.c_tcp_cumulative_established_conns = 0
self.c_url_avg_response_time = 0
self.c_tcp_attempted_conn_rate = 0
self.c_tcp_established_conn_rate = 0
self.c_tcp_attempted_conns = 0
self.c_tcp_established_conns = 0
self.c_current_load = 0
self.c_desired_load = 0
self.c_total_bandwidth = 0
self.c_memory_percent_used = 0
self.c_current_desired_load_variance = 0.0
self.c_current_max_load_variance = 0.0
self.c_transaction_error_percentage = 0.0
self.s_rx_bandwidth = 0
self.s_rx_packet_count = 0
self.s_rx_packet_rate = 0
self.s_tx_bandwidth = 0
self.s_tx_packet_count = 0
self.s_tx_packet_rate = 0
self.s_memory_main_size = 0
self.s_memory_main_used = 0
self.s_memory_packetmem_used = 0
self.s_memory_rcv_queue_length = 0
self.s_memory_avg_cpu = 0
self.s_tcp_closed_error = 0
self.s_tcp_closed = 0
self.s_tcp_closed_reset = 0
self.s_memory_percent_used = 0
self.first_ramp_load_increase = True
self.first_goal_load_increase = True
self.max_load_reached = False
self.max_load = 0
self.stop = False # test loop control
self.phase = None # time phase of test: ramp up, steady ramp down
# rolling statistics
self.rolling_sample_size = self.variance_sample_size
self.max_var_reference = self.in_max_variance
self.rolling_tps = RollingStats(self.rolling_sample_size, 0)
self.rolling_ttfb = RollingStats(self.rolling_sample_size, 1)
self.rolling_current_load = RollingStats(self.rolling_sample_size, 0)
self.rolling_count_since_goal_seek = RollingStats(
self.rolling_sample_size, 1
) # round to 1 for > 0 avg
self.rolling_cps = RollingStats(self.rolling_sample_size, 0)
self.rolling_conns = RollingStats(self.rolling_sample_size, 0)
self.rolling_bw = RollingStats(self.rolling_sample_size, 0)
self.kpi_1 = self.rolling_tps
self.kpi_2 = self.rolling_cps
self.kpi_1_stable = True
self.kpi_2_stable = True
self.kpi_1_list = []
self.kpi_2_list = []
self.ramp_seek_kpi = self.rolling_tps
self.start_time = time.time()
self.timer = time.time() - self.start_time
self.time_to_run = 0
self.time_to_start = 0
self.time_to_activity = 0
self.time_to_stop_start = 0
self.time_to_stop = 0
self.test_started = False
# create entry in result file at the start of test
self.save_results()
@staticmethod
def if_in_set_true(dict_var, dict_key, in_set):
if dict_key in dict_var:
var = dict_var[dict_key]
if var.lower() in in_set:
return True
return False
@staticmethod
def check_if_number(in_value):
if isinstance(in_value, int) or isinstance(in_value, float):
return True
if isinstance(in_value, str):
if in_value.isdigit():
return True
return False
@staticmethod
def return_int_if_present(present, value):
if present:
return int(value)
def get_test_config(self):
try:
response = self.cf.get_test(
self.type_v2, self.test_id, self.temp_dir / "running_test_config.json"
)
log.debug(f"{json.dumps(response, indent=4)}")
except Exception as detailed_exception:
log.error(
f"Exception occurred when retrieving the test: "
f"\n<{detailed_exception}>"
)
return response
def get_queue(self, queue_id):
try:
response = self.cf.get_queue(queue_id)
log.debug(f"{json.dumps(response, indent=4)}")
except Exception as detailed_exception:
log.error(
f"Exception occurred when retrieving test queue informationn: "
f"\n<{detailed_exception}>"
)
return response
@staticmethod
def core_count_lookup(queue_info):
cores = 0
for cg in queue_info["computeGroups"]:
cores = cores + int(cg["cores"])
return cores
@staticmethod
def check_capacity_adjust(
cap_adjust, load_type, client_port_count, client_core_count
):
if cap_adjust.lower() == "auto":
if load_type.lower() in {"simusers", "simusers/second"}:
return client_core_count
else:
return client_port_count
else:
return int(cap_adjust)
def update_config_load(self):
load_type = self.in_load_type.lower()
test_type = self.test_type()
if test_type in {"tput", "emix"} and load_type == "simusers":
load_key = "bandwidth"
self.in_load_type = "SimUsers"
elif test_type in {"tput", "emix"} and load_type == "bandwidth":
load_key = "bandwidth"
self.in_load_type = "Bandwidth"
elif test_type == "tput" and load_type == "simusers/second":
load_key = "bandwidth"
self.in_load_type = "SimUsers/Second"
elif test_type == "cps" and load_type == "connections/second":
load_key = "connectionsPerSecond"
self.in_load_type = "Connections/Second"
elif test_type == "cps" and load_type == "simusers":
load_key = "connectionsPerSecond"
self.in_load_type = "SimUsers"
elif test_type == "cps" and load_type == "simusers/second":
load_key = "connectionsPerSecond"
self.in_load_type = "SimUsers/Second"
elif test_type == "conns" and load_type == "simusers":
load_key = "connections"
self.in_load_type = "SimUsers"
elif test_type == "conns" and load_type == "connections":
load_key = "connections"
self.in_load_type = "Connections"
else:
return False
self.in_start_load = int(self.in_start_load) * self.in_capacity_adjust
self.update_load_constraints()
load_update = {
"config": {
"loadSpecification": {
"duration": int(self.in_duration),
"startup": int(self.in_startup),
"rampup": int(self.in_rampup),
"rampdown": int(self.in_rampdown),
"shutdown": int(self.in_shutdown),
load_key: int(self.in_start_load),
"type": self.in_load_type,
"constraints": self.load_constraints,
# "constraints": {"enabled": False},
}
}
}
with open(self.temp_dir / "test_load_update.json", "w") as f:
json.dump(load_update, f, indent=4)
response = self.cf.update_test(
self.type_v2, self.test_id, self.temp_dir / "test_load_update.json"
)
log.info(f"{json.dumps(response, indent=4)}")
return True
def update_load_constraints(self):
living = {"enabled": False}
open_connections = {"enabled": False}
birth_rate = {"enabled": False}
connections_rate = {"enabled": False}
constraints = False
if self.living_simusers_max_bool:
constraints = True
living = {
"enabled": True,
"max": self.living_simusers_max
}
if constraints:
self.load_constraints = {
"enabled": True,
"living": living,
"openConnections": open_connections,
"birthRate": birth_rate,
"connectionsRate": connections_rate,
}
def test_type(self):
if self.type_v2 == "http_throughput":
test_type = "tput"
elif self.type_v2 == "http_connections_per_second":
test_type = "cps"
elif self.type_v2 == "open_connections":
test_type = "conns"
elif self.type_v2 == "emix":
test_type = "emix"
else:
test_type = "tput"
return test_type
def start_test_run(self):
try:
response = self.cf.start_test(self.test_id)
log.info(f"{json.dumps(response, indent=4)}")
self.test_started = True
except Exception as detailed_exception:
log.error(
f"Exception occurred when starting the test: "
f"\n<{detailed_exception}>"
)
self.test_started = False
return response
def update_test_run(self):
self.test_run_update = self.cf.get_test_run(self.id)
self.status = self.test_run_update.get("status") # main run status 'running'
self.sub_status = self.test_run_update.get("subStatus")
self.score = self.test_run_update.get("score")
self.grade = self.test_run_update.get("grade")
self.started_at = self.test_run_update.get("startedAt")
self.finished_at = self.test_run_update.get("finishedAt")
self.progress = self.test_run_update.get("progress")
self.time_elapsed = self.test_run_update.get("timeElapsed")
self.time_remaining = self.test_run_update.get("timeRemaining")
update_test_run_log = (
f"Status: {self.status} sub status: {self.sub_status} "
f" elapsed: {self.time_elapsed} remaining: {self.time_remaining}"
)
log.debug(update_test_run_log)
return True
def update_phase(self):
"""updates test phase based on elapsed time vs. loadspec configuration
If goal seeking is enabled and the test is in steady phase, the phase will be set to goalseek
:return: None
"""
phase = None
steady_duration = self.in_duration - (
self.in_startup + self.in_rampup + self.in_rampdown + self.in_shutdown
)
if 0 <= self.time_elapsed <= self.in_startup:
phase = "startup"
elif self.in_startup <= self.time_elapsed <= (self.in_startup + self.in_rampup):
phase = "rampup"
elif (
(self.in_startup + self.in_rampup)
<= self.time_elapsed
<= (self.in_duration - (self.in_rampdown + self.in_shutdown))
):
phase = "steady"
if self.first_steady_interval:
phase = "rampup"
self.first_steady_interval = False
elif (
(self.in_startup + self.in_rampup + steady_duration)
<= self.time_elapsed
<= (self.in_duration - self.in_shutdown)
):
phase = "rampdown"
elif (
(self.in_duration - self.in_shutdown)
<= self.time_elapsed
<= self.in_duration
):
phase = "shutdown"
elif self.in_duration <= self.time_elapsed:
phase = "finished"
log.info(f"test phase: {phase}")
self.phase = phase
# Override phase if ramp seek is enabled
if self.in_ramp_seek and self.phase == "steady" and not self.ramp_seek_complete:
self.phase = "rampseek"
log.info(f"ramp seek phase: {self.phase}")
# Override phase if goal seeking is enabled
elif self.in_goal_seek and self.phase == "steady":
self.phase = "goalseek"
log.info(f"goal seek phase: {self.phase}")
def update_run_stats(self):
get_run_stats = self.cf.fetch_test_run_statistics(self.id)
# log.debug(f'{get_run_stats}')
self.update_client_stats(get_run_stats)
self.update_server_stats(get_run_stats)
def update_client_stats(self, get_run_stats):
client_stats = {}
for i in get_run_stats["client"]:
if "type" in i and "subType" in i and "value" in i:
type = i["type"]
sub_type = i["subType"]
value = i["value"]
if not type in client_stats:
client_stats[type] = {}
client_stats[type][sub_type] = value
elif "type" in i and "value" in i:
type = i["type"]
value = i["value"]
client_stats[type] = value
self.assign_client_run_stats(client_stats)
def update_server_stats(self, get_run_stats):
server_stats = {}
for i in get_run_stats["server"]:
if "type" in i and "subType" in i and "value" in i:
type = i["type"]
sub_type = i["subType"]
value = i["value"]
if not type in server_stats:
server_stats[type] = {}
server_stats[type][sub_type] = value
elif "type" in i and "value" in i:
type = i["type"]
value = i["value"]
server_stats[type] = value
self.assign_server_run_stats(server_stats)
def assign_client_run_stats(self, client_stats):
self.c_rx_bandwidth = client_stats.get("driver", {}).get("rxBandwidth", 0)
self.c_rx_packet_count = client_stats.get("driver", {}).get("rxPacketCount", 0)
self.c_rx_packet_rate = client_stats.get("driver", {}).get("rxPacketRate", 0)
self.c_tx_bandwidth = client_stats.get("driver", {}).get("txBandwidth", 0)
self.c_tx_packet_count = client_stats.get("driver", {}).get("txPacketCount", 0)
self.c_tx_packet_rate = client_stats.get("driver", {}).get("txPacketRate", 0)
self.c_http_aborted_txns = client_stats.get("http", {}).get("abortedTxns", 0)
self.c_http_aborted_txns_sec = client_stats.get("http", {}).get(
"abortedTxnsPerSec", 0
)
self.c_http_attempted_txns = client_stats.get("sum", {}).get("attemptedTxns", 0)
self.c_http_attempted_txns_sec = client_stats.get("sum", {}).get(
"attemptedTxnsPerSec", 0
)
self.c_http_successful_txns = client_stats.get("sum", {}).get(
"successfulTxns", 0
)
self.c_http_successful_txns_sec = client_stats.get("sum", {}).get(
"successfulTxnsPerSec", 0
)
self.c_http_unsuccessful_txns = client_stats.get("sum", {}).get(
"unsuccessfulTxns", 0
)
self.c_http_unsuccessful_txns_sec = client_stats.get("sum", {}).get(
"unsuccessfulTxnsPerSec", 0
)
self.c_loadspec_avg_idle = client_stats.get("loadspec", {}).get(
"averageIdleTime", 0
)
self.c_loadspec_avg_cpu = round(
client_stats.get("loadspec", {}).get("cpuUtilized", 0), 1
)
self.c_memory_main_size = client_stats.get("memory", {}).get("mainPoolSize", 0)
self.c_memory_main_used = client_stats.get("memory", {}).get("mainPoolUsed", 0)
self.c_memory_packetmem_used = client_stats.get("memory", {}).get(
"packetMemoryUsed", 0
)
self.c_memory_rcv_queue_length = client_stats.get("memory", {}).get(
"rcvQueueLength", 0
)
self.c_simusers_alive = client_stats.get("simusers", {}).get("simUsersAlive", 0)
self.c_simusers_animating = client_stats.get("simusers", {}).get(
"simUsersAnimating", 0
)
self.c_simusers_blocking = client_stats.get("simusers", {}).get(
"simUsersBlocking", 0
)
self.c_simusers_sleeping = client_stats.get("simusers", {}).get(
"simUsersSleeping", 0
)
self.c_current_load = client_stats.get("sum", {}).get("currentLoadSpecCount", 0)
self.c_desired_load = client_stats.get("sum", {}).get("desiredLoadSpecCount", 0)
self.c_tcp_avg_ttfb = round(
client_stats.get("tcp", {}).get("averageTimeToFirstByte", 0), 1
)
self.c_tcp_avg_tt_synack = round(
client_stats.get("tcp", {}).get("averageTimeToSynAck", 0), 1
)
self.c_tcp_cumulative_attempted_conns = client_stats.get("tcp", {}).get(
"cummulativeAttemptedConns", 0
)
self.c_tcp_cumulative_established_conns = client_stats.get("tcp", {}).get(
"cummulativeEstablishedConns", 0
)
self.c_url_avg_response_time = round(
client_stats.get("url", {}).get("averageRespTimePerUrl", 0), 1
)
self.c_tcp_attempted_conn_rate = client_stats.get("sum", {}).get(
"attemptedConnRate", 0
)
self.c_tcp_established_conn_rate = client_stats.get("sum", {}).get(
"establishedConnRate", 0
)
self.c_tcp_attempted_conns = client_stats.get("sum", {}).get(
"attemptedConns", 0
)
self.c_tcp_established_conns = client_stats.get("sum", {}).get(
"currentEstablishedConns", 0
)
self.time_elapsed = client_stats.get("timeElapsed", 0)
self.time_remaining = client_stats.get("timeRemaining", 0)
self.c_total_bandwidth = self.c_rx_bandwidth + self.c_tx_bandwidth
if self.c_memory_main_size > 0 and self.c_memory_main_used > 0:
self.c_memory_percent_used = round(
self.c_memory_main_used / self.c_memory_main_size, 1
)
if self.c_current_load > 0 and self.c_desired_load > 0:
self.c_current_desired_load_variance = round(
self.c_current_load / self.c_desired_load, 2
)
if self.c_http_successful_txns > 0:
self.c_transaction_error_percentage = (
self.c_http_unsuccessful_txns + self.c_http_aborted_txns
) / self.c_http_successful_txns
return True
def assign_server_run_stats(self, server_stats):
self.s_rx_bandwidth = server_stats.get("driver", {}).get("rxBandwidth", 0)
self.s_rx_packet_count = server_stats.get("driver", {}).get("rxPacketCount", 0)
self.s_rx_packet_rate = server_stats.get("driver", {}).get("rxPacketRate", 0)
self.s_tx_bandwidth = server_stats.get("driver", {}).get("txBandwidth", 0)
self.s_tx_packet_count = server_stats.get("driver", {}).get("txPacketCount", 0)
self.s_tx_packet_rate = server_stats.get("driver", {}).get("txPacketRate", 0)
self.s_memory_main_size = server_stats.get("memory", {}).get("mainPoolSize", 0)
self.s_memory_main_used = server_stats.get("memory", {}).get("mainPoolUsed", 0)
self.s_memory_packetmem_used = server_stats.get("memory", {}).get(
"packetMemoryUsed", 0
)
self.s_memory_rcv_queue_length = server_stats.get("memory", {}).get(
"rcvQueueLength", 0
)
self.s_memory_avg_cpu = round(
server_stats.get("memory", {}).get("cpuUtilized", 0), 1
)
self.s_tcp_closed_error = server_stats.get("sum", {}).get("closedWithError", 0)
self.s_tcp_closed = server_stats.get("sum", {}).get("closedWithNoError", 0)
self.s_tcp_closed_reset = server_stats.get("sum", {}).get("closedWithReset", 0)
if self.s_memory_main_size > 0 and self.s_memory_main_used > 0:
self.s_memory_percent_used = round(
self.s_memory_main_used / self.s_memory_main_size, 1
)
return True
def print_test_status(self):
status = (
f"{self.timer}s -status: {self.status} -sub status: {self.sub_status} "
f"-progress: {self.progress} -seconds elapsed: {self.time_elapsed} "
f"-remaining: {self.time_remaining}"
)
print(status)
def print_test_stats(self):
stats = (
f"{self.time_elapsed}s {self.phase} -load: {self.c_current_load:,}/{self.c_desired_load:,} "
f"-current/desired var: {self.c_current_desired_load_variance} "
f"-current avg/max var: {self.rolling_tps.avg_max_load_variance} "
f"-seek ready: {self.rolling_count_since_goal_seek.stable}"
f"\n-tps: {self.c_http_successful_txns_sec:,} -tps stable: {self.rolling_tps.stable} "
f"-tps cur avg: {self.rolling_tps.avg_val:,} -tps prev: {self.rolling_tps.avg_val_last:,} "
f"-delta tps: {self.rolling_tps.increase_avg} -tps list:{self.rolling_tps.list} "
f"\n-cps: {self.c_tcp_established_conn_rate:,} -cps stable: {self.rolling_cps.stable} "
f"-cps cur avg: {self.rolling_cps.avg_val:,} -cps prev: {self.rolling_cps.avg_val_last:,} "
f"-delta cps: {self.rolling_cps.increase_avg} -cps list:{self.rolling_cps.list} "
f"\n-conns: {self.c_tcp_established_conns:,} -conns stable: {self.rolling_conns.stable} "
f"-conns cur avg: {self.rolling_conns.avg_val:,} -conns prev: {self.rolling_conns.avg_val_last:,} "
f"-delta conns: {self.rolling_cps.increase_avg} -conns list:{self.rolling_conns.list} "
f"\n-bw: {self.c_total_bandwidth:,} -bw stable: {self.rolling_bw.stable} "
f"-bw cur avg: {self.rolling_bw.avg_val:,} -bw prev: {self.rolling_bw.avg_val_last:,} "
f"-delta bw: {self.rolling_bw.increase_avg} -bw list:{self.rolling_bw.list} "
f"\n-ttfb: {self.c_tcp_avg_ttfb:,} -ttfb stable: {self.rolling_ttfb.stable} "
f"-ttfb cur avg: {self.rolling_ttfb.avg_val:,} -ttfb prev: {self.rolling_ttfb.avg_val_last:,} "
f"-delta ttfb: {self.rolling_ttfb.increase_avg} -ttfb list:{self.rolling_ttfb.list} "
# f"\n-total bw: {self.c_total_bandwidth:,} -rx bw: {self.c_rx_bandwidth:,}"
# f" tx bw: {self.c_tx_bandwidth:,}"
# f"\n-ttfb cur avg: {self.rolling_ttfb.avg_val} -ttfb prev: {self.rolling_ttfb.avg_val_last} "
# f"-delta ttfb: {self.rolling_ttfb.increase_avg} -ttfb list:{self.rolling_ttfb.list}"
)
print(stats)
log.debug(stats)
def wait_for_running_status(self):
"""
Wait for the current test to return a 'running' status.
:return: True if no statements failed and there were no exceptions. False otherwise.
"""
log.debug("Inside the RunTest/wait_for_running_status method.")
i = 0
while True:
time.sleep(4)
self.timer = int(round(time.time() - self.start_time))
i += 4
if not self.update_test_run():
return False
if self.status == "running":
print(f"{self.timer}s - status: {self.status}")
break
print(
f"{self.timer}s - status: {self.status} sub status: {self.sub_status}"
)
if self.status in {"failed", "finished"}:
log.error("Test failed")
return False
# check to see if another test with the same ID is running
# (can happen due to requests retry)
if i > 120 and self.status == "waiting":
self.check_running_tests()
# stop after 1800 seconds of waiting
if i > 1800:
log.error(
"Waited for 1800 seconds, test did not transition to a running status."
)
return False
self.time_to_run = self.timer
log.debug(f"Test {self.name} successfully went to running status.")
log.debug(json.dumps(self.test_run_update, indent=4))
self.run_id = self.test_run_update.get("runId")
self.report_link = (
"https://"
+ self.cf.controller_ip
+ "/#results/"
+ self.type_v1
+ "/"
+ self.run_id
)
return True
def check_running_tests(self):
"""Checks if tests with same ID is running and changes control to this test
This function can be triggered if waiting status is too long because the requests module retry mechanism has
kicked off two duplicate tests in error. It will look for matching running tests and switch control over to the
already running duplicate test.
:return: None
"""
# get list of run IDs and test IDs with status
test_runs = self.cf.list_test_runs()
# look for running status and compare ID
for run in test_runs:
if run["status"] == "running":
log.debug(
f"check_running_tests found running test: {json.dumps(run, indent=4)}"
)
# if waiting and running test IDs match, change the running test
if self.test_id == run["testId"]:
log.debug(
f"check_running_tests found matching test_id {self.test_id}"
)
# stop current waiting test
response = self.cf.stop_test(self.id)
log.debug(
f"change_running_test, stopped duplicate waiting test: {response}"
)
# change over to running test
self.id = run["id"]
else:
log.debug(
f"check_running_tests test_id: {self.test_id} "
f"does not match running test_id: {run['testId']}"
)
def wait_for_running_sub_status(self):
"""
Wait for the current test to return a 'None' sub status.
:return: True if no statements failed and there were no exceptions. False otherwise.
"""
log.debug("Inside the RunTest/wait_for_running_sub_status method.")
i = 0
while True:
time.sleep(4)
self.timer = int(round(time.time() - self.start_time))
i += 4
if not self.update_test_run():
return False
print(
f"{self.timer}s - status: {self.status} sub status: {self.sub_status}"
)
if self.sub_status is None:
break
if self.status in {"failed", "finished"}:
log.error("Test failed")
return False
# stop after 0 seconds of waiting
if i > 360:
log.error(
"Waited for 360 seconds, test did not transition to traffic state."
)
return False
self.time_to_start = self.timer - self.time_to_run
log.debug(f"Test {self.name} successfully went to traffic state.")
log.debug(json.dumps(self.test_run_update, indent=4))
return True
def stop_wait_for_finished_status(self):
"""
Stop and wait for the current test to return a 'finished' status.
:return: True if no statements failed and there were no exceptions.
False otherwise.
"""
log.debug("Inside the stop_test/wait_for_finished_status method.")
self.time_to_stop_start = self.timer
if self.status == "running":
self.cf.stop_test(self.id)
i = 0
while True:
time.sleep(4)
self.timer = int(round(time.time() - self.start_time))
i += 4
if not self.update_test_run():
return False
if self.status in {"stopped", "finished", "failed"}:
print(f"{self.timer} status: {self.status}")
break
if self.status == "failed":
print(f"{self.timer} status: {self.status}")
return False
print(
f"{self.timer}s - status: {self.status} sub status: {self.sub_status}"
)
if i > 1800:
error_msg = (
"Waited for 1800 seconds, "
"test did not transition to a finished status."
)
log.error(error_msg)
print(error_msg)
return False
self.time_to_stop = self.timer - self.time_to_stop_start
log.debug(
f"Test {self.name} successfully went to finished status in "
f"{self.time_to_stop} seconds."
)
return True
def wait_for_test_activity(self):
"""
Wait for the current test to show activity - metric(s) different than 0.
:return: True if no statements failed and there were no exceptions.
False otherwise.
"""
log.debug("Inside the RunTest/wait_for_test_activity method.")
test_generates_activity = False
i = 0
while not test_generates_activity:
self.timer = int(round(time.time() - self.start_time))
self.update_test_run()
self.update_run_stats()
# self.print_test_status()
if self.sub_status is None:
self.print_test_stats()
self.save_results()
if self.c_http_successful_txns_sec > 0:
test_generates_activity = True
if self.status in {"failed", "finished"}:
log.error("Test failed")
return False
if i > 180:
error_msg = (
"Waited for 180 seconds, test did not have successful transactions"
)
log.error(error_msg)
print(error_msg)
return False
time.sleep(4)
i = i + 4
print(f"")
self.time_to_activity = self.timer - self.time_to_start - self.time_to_run
return True
@staticmethod
def countdown(t):
"""countdown function
Can be used after load increase for results to update
:param t: countdown in seconds
:return: None
"""
while t:
mins, secs = divmod(t, 60)
time_format = "{:02d}:{:02d}".format(mins, secs)
print(time_format, end="\r")
time.sleep(1)
t -= 1
def goal_seek(self):
log.info(f"In goal_seek function")
if self.c_current_load == 0:
self.stop = True
log.info(f"goal_seek stop, c_current_load == 0")
return False
if self.first_goal_load_increase:
self.first_goal_load_increase = False
new_load = self.c_current_load + (self.in_incr_low *
self.in_capacity_adjust)
else:
if self.check_if_load_type_simusers():
new_load = self.goal_seek_set_simuser_kpi(self.kpi_1)
log.info(f"new_load = {new_load}")
elif self.check_if_load_type_default():
new_load = self.goal_seek_set_default()
log.info(f"new_load = {new_load}")
else:
report_error = f"Unknown load type: " \
f"{self.test_config['config']['loadSpecification']['type']}"
log.error(report_error)
print(report_error)
return False
if new_load is False:
log.info(
f"Config load spec type: {self.test_config['config']['loadSpecification']['type']}"
)
log.info(f"Goal_seek return, new_load is False")
return False
self.change_update_load(new_load, 16)
return True
def ramp_seek(self, ramp_kpi, ramp_to_value):
log.info(f"In ramp_seek function")
if self.c_current_load == 0:
self.stop = True
log.info(f"ramp_seek stop, c_current_load == 0")
return False
# if self.first_ramp_load_increase:
# self.first_ramp_load_increase = False
# new_load = self.c_current_load * 2
if self.in_ramp_step < 1:
self.ramp_seek_complete = True
return
if ramp_kpi.current_value < ramp_to_value:
load_increase_multiple = round(ramp_to_value / ramp_kpi.current_value, 3)
load_increase = (self.c_current_load * load_increase_multiple) - self.c_current_load
load_increase = round(load_increase / self.in_ramp_step, 3)
new_load = self.round_up_to_even(self.c_current_load + load_increase)
self.in_ramp_step = self.in_ramp_step - 1
log.info(f"new load: {new_load}, current_load: {self.c_current_load}"
f" * {load_increase} load_increase "
f"ramp_step left: {self.in_ramp_step} "
f"\n ramp_to_value: {ramp_to_value} "
f"ramp_kpi.current_value: {ramp_kpi.current_value}"
)
self.in_incr_low = self.round_up_to_even(new_load * self.in_ramp_low/100)
self.in_incr_med = self.round_up_to_even(new_load * self.in_ramp_med/100)
self.in_incr_high = self.round_up_to_even(new_load * self.in_ramp_high/100)
else:
self.ramp_seek_complete = True
self.change_update_load(new_load, 8)
return True
@staticmethod
def round_up_to_even(v):
return math.ceil(v / 2.) * 2
def check_if_load_type_simusers(self):
if self.test_config["config"]["loadSpecification"]["type"].lower() in {
"simusers",
"simusers/second",
}:
return True
return False
def check_if_load_type_default(self):
if self.test_config["config"]["loadSpecification"]["type"].lower() in {
"bandwidth",
"connections",
"connections/second",
}:
return True
return False
def change_update_load(self, new_load, count_down):
new_load = self.round_up_to_even(new_load)
log_msg = f"\nchanging load from: {self.c_current_load} to: {new_load} status: {self.status}"
log.info(log_msg)
print(log_msg)
try:
self.cf.change_load(self.id, new_load)
self.rolling_tps.load_increase_complete()
self.rolling_ttfb.load_increase_complete()
self.rolling_current_load.load_increase_complete()
self.rolling_cps.load_increase_complete()
self.rolling_conns.load_increase_complete()
self.rolling_bw.load_increase_complete()
except Exception as detailed_exception:
log.error(
f"Exception occurred when changing test: " f"\n<{detailed_exception}>"
)
self.countdown(count_down)
return True
def goal_seek_set_default(self):
set_load = 0
if self.c_current_desired_load_variance >= 0.97:
if self.c_current_load <= self.in_threshold_low:
set_load = self.c_current_load + (
self.in_incr_low * self.in_capacity_adjust
)
elif self.c_current_load <= self.in_threshold_med:
set_load = self.c_current_load + (
self.in_incr_med * self.in_capacity_adjust
)
elif self.c_current_load <= self.in_threshold_high:
set_load = self.c_current_load + (
self.in_incr_high * self.in_capacity_adjust
)
elif self.c_current_load > self.in_threshold_high:
return False
else:
return False
if self.in_threshold_high < set_load:
if self.c_current_desired_load_variance > 0.99:
return False
else:
set_load = self.in_threshold_high
return set_load
def goal_seek_set_simuser_kpi(self, kpi):
log.debug(f"in goal_seek_set_simuser_kpi function")
set_load = 0
if kpi.increase_avg >= self.in_threshold_low:
set_load = self.c_current_load + (self.in_incr_low *
self.in_capacity_adjust)
elif kpi.increase_avg >= self.in_threshold_med:
set_load = self.c_current_load + (self.in_incr_med *
self.in_capacity_adjust)
elif kpi.increase_avg >= self.in_threshold_high:
set_load = self.c_current_load + (self.in_incr_high *
self.in_capacity_adjust)
elif kpi.increase_avg < self.in_threshold_high:
log.info(
f"rolling_tps.increase_avg {kpi.increase_avg} < "
f"{self.in_threshold_high} in_threshold_high"
)
return False
if kpi.avg_max_load_variance < 0.97:
set_load = self.c_current_load
self.max_load_reached = True
log.info(
f"set_load = {set_load} "
f"kpi_avg_max_load_variance: {kpi.avg_max_load_variance}"
)
return set_load
def update_rolling_averages(self):
"""Updates rolling statistics averages used to make test control decisions
:return: None
"""
self.rolling_tps.update(self.c_http_successful_txns_sec)
self.rolling_tps.check_if_stable(self.max_var_reference)
self.rolling_ttfb.update(self.c_tcp_avg_ttfb)
self.rolling_ttfb.check_if_stable(self.max_var_reference)
self.rolling_current_load.update(self.c_current_load)
self.rolling_current_load.check_if_stable(self.max_var_reference)
self.rolling_cps.update(self.c_tcp_established_conn_rate)
self.rolling_cps.check_if_stable(self.max_var_reference)
self.rolling_conns.update(self.c_tcp_established_conns)
self.rolling_conns.check_if_stable(self.max_var_reference)
self.rolling_bw.update(self.c_total_bandwidth)
self.rolling_bw.check_if_stable(self.max_var_reference)
self.rolling_count_since_goal_seek.update(1)
self.rolling_count_since_goal_seek.check_if_stable(0)
def check_kpi(self):
self.in_kpi_1 = self.in_kpi_1.lower()
if self.in_kpi_1 == "tps":
self.kpi_1 = self.rolling_tps
elif self.in_kpi_1 == "cps":
self.kpi_1 = self.rolling_cps
elif self.in_kpi_1 == "conns":
self.kpi_1 = self.rolling_conns
elif self.in_kpi_1 == "bw":
self.kpi_1 = self.rolling_bw
elif self.in_kpi_1 == "ttfb":
self.kpi_1 = self.rolling_ttfb
else:
log.debug(f"check_kpi unknown kpi_1, setting to TPS")
self.kpi_1 = self.rolling_tps
self.in_kpi_2 = self.in_kpi_2.lower()
if self.in_kpi_2 == "tps":
self.kpi_2 = self.rolling_tps
elif self.in_kpi_2 == "cps":
self.kpi_2 = self.rolling_cps
elif self.in_kpi_2 == "conns":
self.kpi_2 = self.rolling_conns
elif self.in_kpi_2 == "bw":
self.kpi_2 = self.rolling_bw
elif self.in_kpi_2 == "ttfb":
self.kpi_2 = self.rolling_ttfb
else:
log.debug(f"check_kpi unknown kpi_2, setting to CPS")
self.kpi_2 = self.rolling_cps
def check_ramp_seek_kpi(self):
if self.in_ramp_seek_kpi == "tps":
self.ramp_seek_kpi = self.rolling_tps
elif self.in_ramp_seek_kpi == "cps":
self.ramp_seek_kpi = self.rolling_cps
elif self.in_ramp_seek_kpi == "conns":
self.ramp_seek_kpi = self.rolling_conns
elif self.in_ramp_seek_kpi == "bw":
self.ramp_seek_kpi = self.rolling_bw
elif self.in_ramp_seek_kpi == "ttfb":
self.ramp_seek_kpi = self.rolling_ttfb
else:
log.debug(f"check_ramp_seek_kpi unknown kpi, setting to TPS")
self.ramp_seek_kpi = self.rolling_tps
@staticmethod
def return_bool_true(check_if, is_value):
if isinstance(check_if, bool):
return check_if
if isinstance(check_if, str) and check_if.lower() == is_value:
return True
return False
def control_test(self):
"""Main test control
Runs test. Start by checking if test is in running state followed by checking
for successful connections.
First updates stats, checks the phase test is in based on elapsed time, then updates
rolloing averages.
:return: True if test completed successfully
"""
# exit control_test if test does not go into running state
if not self.wait_for_running_status():
log.info(f"control_test end, wait_for_running_status False")
return False
# exit control_test if test does not go into running state
if not self.wait_for_running_sub_status():
log.info(f"control_test end, wait_for_running_sub_status False")
return False
# exit control_test if test does not have successful transactions
if not self.wait_for_test_activity():
self.stop_wait_for_finished_status()
log.info(f"control_test end, wait_for_test_activity False")
return False
self.check_ramp_seek_kpi()
self.check_kpi()
self.rolling_count_since_goal_seek.reset()
# self.countdown(12)
# test control loop - runs until self.stop is set to True
while not self.stop:
self.update_run_stats()
self.update_phase()
self.check_stop_conditions()
self.update_rolling_averages()
# print stats if test is running
if self.sub_status is None:
self.print_test_stats()
self.save_results()
if self.in_ramp_seek and not self.ramp_seek_complete:
log.info(f"control_test going to ramp_seek")
self.control_test_ramp_seek(self.ramp_seek_kpi, self.in_ramp_seek_value)
if self.in_goal_seek and self.ramp_seek_complete:
log.info(f"control_test going to goal_seek")
self.control_test_goal_seek_kpi(self.kpi_1, self.kpi_2,
self.in_kpi_and_or)
print(f"")
time.sleep(4)
# if goal_seek is yes enter sustained steady phase
if self.in_goal_seek and self.in_sustain_period > 0:
self.sustain_test()
# stop test and wait for finished status
if self.stop_wait_for_finished_status():
self.time_to_stop = self.timer - self.time_to_stop_start
self.save_results()
return True
return False
def check_stop_conditions(self):
log.debug(f"in check_stop_conditions method")
# stop test if time_remaining returned from controller == 0
if self.time_remaining == 0:
self.phase = "timeout"
log.info(f"control_test end, time_remaining == 0")
self.stop = True
# stop goal seeking test if time remaining is less than 30s
if self.time_remaining < 30 and self.in_goal_seek:
self.phase = "timeout"
log.info(f"control_test end goal_seek, time_remaining < 30")
self.stop = True
elif self.time_remaining < 30 and self.in_ramp_seek:
self.phase = "timeout"
log.info(f"control_test end ramp_seek, time_remaining < 30")
self.stop = True
if self.phase == "finished":
log.info(f"control_test end, over duration time > phase: finished")
self.stop = True
def control_test_ramp_seek(self, ramp_kpi, ramp_to_value):
"""
Increases load to a configured tps, cps, conns or bandwidth level.
:return: True if no statements failed and there were no exceptions.
False otherwise.
"""
ramp_seek_count = 1
#log.debug("Inside the RunTest/ramp_to_seek method.")
log.info(
f"Inside the RunTest/ramp_to_seek method.\n"
f"rolling_count_list stable: {self.rolling_count_since_goal_seek.stable} "
f"list: {self.rolling_count_since_goal_seek.list} "
f"\nramp_to_value: {ramp_to_value} ramp_kpi current: {ramp_kpi.current_value}"
f" increase: {ramp_kpi.increase_avg}"
f"\n current load: {self.c_current_load}"
f" desired_load: {self.c_desired_load}"
)
if self.phase is not "rampseek":
log.info(f"phase {self.phase} is not 'rampseek', "
f"returning from contol_test_ramp_seek")
return
if not self.rolling_count_since_goal_seek.stable:
log.info(f"count since goal seek is not stable. "
f"count list: {self.rolling_count_since_goal_seek.list}"
f"returning from control_test_ramp_seek")
return
if self.max_load_reached:
log.info(f"control_test_ramp_seek end, max_load_reached")
self.stop = True
return
# check if kpi avg is under set avg - if not, stop loop
if ramp_to_value < ramp_kpi.current_value:
log.info(f"ramp_to_value {ramp_to_value} < ramp_kpi.current_value {ramp_kpi.current_value}"
f"completed ramp_seek")
self.ramp_seek_complete = True
self.in_capacity_adjust = 1
return
if self.ramp_seek(ramp_kpi, ramp_to_value):
# reset rolling count > no load increase until
# at least the window size interval.
# allows stats to stabilize after an increase
self.rolling_count_since_goal_seek.reset()
else:
log.info(f"control_test_ramp_seek end, ramp_seek False")
self.ramp_seek_complete = True
self.in_capacity_adjust = 1
return
if (ramp_kpi.current_value / ramp_to_value) > 0.95:
log.info(
f"ramp_kpi.current_value {ramp_kpi.current_value} / "
f"ramp_to_value {ramp_to_value} > 0.95 "
f"increasing ramp_seek_count + 1")
ramp_seek_count = ramp_seek_count + 1
if ramp_seek_count == self.in_ramp_step:
log.info(f"ramp_seek_complete early")
self.ramp_seek_complete = True
self.in_capacity_adjust = 1
return
return
def control_test_goal_seek_kpi(self, kpi_1,
kpi_2, kpis_and_bool):
log.info(
f"rolling_count_list stable: {self.rolling_count_since_goal_seek.stable} "
f"list: {self.rolling_count_since_goal_seek.list} "
f"\nKpi1 stable: {kpi_1.stable} list: {kpi_1.list}"
f"\nKpi2 stable: {kpi_2.stable} list: {kpi_2.list}"
)
if self.phase is not "goalseek":
log.info(f"phase {self.phase} is not 'goalseek', "
f"returning from contol_test_goal_seek")
return
if not self.rolling_count_since_goal_seek.stable:
log.info(f"count since goal seek is not stable. "
f"count list: {self.rolling_count_since_goal_seek.list}")
return
if self.max_load_reached:
log.info(f"control_test end, max_load_reached")
self.stop = True
return
if kpis_and_bool:
if kpi_1.stable and kpi_2.stable:
goal_seek = True
else:
goal_seek = False
else:
if kpi_1.stable or kpi_2.stable:
goal_seek = True
else:
goal_seek = False
if goal_seek:
if self.goal_seek():
# reset rolling count > no load increase until
# at least the window size interval.
# allows stats to stabilize after an increase
self.rolling_count_since_goal_seek.reset()
else:
log.info(f"control_test end, goal_seek False")
self.stop = True
def sustain_test(self):
self.phase = "steady"
while self.in_sustain_period > 0:
self.timer = int(round(time.time() - self.start_time))
sustain_period_loop_time_start = time.time()
self.update_run_stats()
if self.time_remaining < 30 and self.in_goal_seek:
self.phase = "timeout"
self.in_sustain_period = 0
log.info(f"sustain_test end, time_remaining < 30")
# self.update_averages()
print(f"sustain period time left: {int(self.in_sustain_period)}")
# print stats if test is running
if self.sub_status is None:
self.print_test_stats()
self.save_results()
time.sleep(4)
self.in_sustain_period = self.in_sustain_period - (
time.time() - sustain_period_loop_time_start
)
self.phase = "stopping"
# self.stop_wait_for_finished_status()
return True
def save_results(self):
csv_list = [
self.in_name,
self.time_elapsed,
self.phase,
self.c_current_load,
self.c_desired_load,
self.rolling_count_since_goal_seek.stable,
self.c_http_successful_txns_sec,
self.rolling_tps.stable,
self.rolling_tps.increase_avg,
self.c_http_successful_txns,
self.c_http_unsuccessful_txns,
self.c_http_aborted_txns,
self.c_transaction_error_percentage,
self.c_tcp_established_conn_rate,
self.rolling_cps.stable,
self.rolling_cps.increase_avg,
self.c_tcp_established_conns,
self.rolling_conns.stable,
self.rolling_conns.increase_avg,
self.c_tcp_avg_tt_synack,
self.c_tcp_avg_ttfb,
self.rolling_ttfb.stable,
self.rolling_ttfb.increase_avg,
self.c_url_avg_response_time,
self.c_tcp_cumulative_established_conns,
self.c_tcp_cumulative_attempted_conns,
self.c_total_bandwidth,
self.rolling_bw.stable,
self.rolling_bw.increase_avg,
self.c_rx_bandwidth,
self.c_tx_bandwidth,
self.c_rx_packet_rate,
self.c_tx_packet_rate,
self.s_tcp_closed,
self.s_tcp_closed_reset,
self.s_tcp_closed_error,
self.c_simusers_alive,
self.c_simusers_animating,
self.c_simusers_blocking,
self.c_simusers_sleeping,
self.c_loadspec_avg_cpu,
self.c_memory_percent_used,
self.c_memory_packetmem_used,
self.c_memory_rcv_queue_length,
self.s_memory_avg_cpu,
self.s_memory_percent_used,
self.s_memory_packetmem_used,
self.s_memory_rcv_queue_length,
self.type_v1,
self.type_v2,
self.in_load_type,
self.test_id,
self.id,
self.time_to_run,
self.time_to_start,
self.time_to_activity,
self.time_to_stop,
script_version,
self.report_link,
]
self.result_file.append_file(csv_list)
class DetailedCsvReport:
def __init__(self, report_location):
log.debug("Initializing detailed csv result files.")
self.time_stamp = time.strftime("%Y%m%d-%H%M")
self.report_csv_file = report_location / f"{self.time_stamp}_Detailed.csv"
self.columns = [
"test_name",
"seconds",
"state",
"current_load",
"desired_load",
"seek_ready",
"tps",
"tps_stable",
"tps_delta",
"successful_txn",
"unsuccessful_txn",
"aborted_txn",
"txn_error_rate",
"cps",
"cps_stable",
"cps_delta",
"open_conns",
"conns_stable",
"conns_delta",
"tcp_avg_tt_synack",
"tcp_avg_ttfb",
"ttfb_stable",
"ttfb_delta",
"url_response_time",
"total_tcp_established",
"total_tcp_attempted",
"total_bandwidth",
"bw_stable",
"bw_delta",
"rx_bandwidth",
"tx_bandwidth",
"rx_packet_rate",
"tx_packet_rate",
"tcp_closed",
"tcp_reset",
"tcp_error",
"simusers_alive",
"simusers_animating",
"simusers_blocking",
"simusers_sleeping",
"client_cpu",
"client_mem",
"client_pkt_mem",
"client_rcv_queue",
"server_cpu",
"server_mem",
"server_pkt_mem",
"server_rcv_queue",
"test_type_v1",
"test_type_v2",
"load_type",
"test_id",
"run_id",
"t_run",
"t_start",
"t_tx",
"t_stop",
"version",
"report",
]
def append_columns(self):
"""
Appends the column headers to the detailed report file.
:return: no specific return value.
"""
try:
csv_header = ",".join(map(str, self.columns)) + "\n"
with open(self.report_csv_file, "a") as f:
f.write(csv_header)
except Exception as detailed_exception:
log.error(
f"Exception occurred writing to the detailed report file: \n<{detailed_exception}>\n"
)
log.debug(
f"Successfully appended columns to the detailed report file: {self.report_csv_file}."
)
def append_file(self, csv_list):
"""
Appends the detailed report csv file with csv_line.
:param csv_list: items to be appended as line to the file.
:return: no specific return value.
"""
try:
csv_line = ",".join(map(str, csv_list)) + "\n"
with open(self.report_csv_file, "a") as f:
f.write(csv_line)
except Exception as detailed_exception:
log.error(
f"Exception occurred writing to the detailed report file: \n<{detailed_exception}>\n"
)
class Report:
def __init__(self, report_csv_file, column_order):
self.report_csv_file = report_csv_file
self.col_order = column_order
self.df_base = pd.read_csv(self.report_csv_file)
self.df_steady = self.df_base[self.df_base.state == "steady"].copy()
self.unique_tests = self.df_base["test_name"].unique().tolist()
self.results = []
self.process_results()
self.format_results()
self.df_results = | pd.DataFrame(self.results) | pandas.DataFrame |
'''
This is an example of cross tabulation tables created from clustering.
Inspired from the "Unsupervised Learning" course on Datacamp.com
Author: <NAME>
'''
# Import pandas
import pandas as pd
# Fit the pipeline to samples (samples is an array)
pipeline.fit(samples)
# Calculate the cluster labels: labels
labels = pipeline.predict(samples)
# Create a DataFrame with labels and species as columns: df
df = | pd.DataFrame({'labels':labels, 'species':species}) | pandas.DataFrame |
from __future__ import print_function
import logging
import pandas as pd
import numpy as np
import scipy.stats as stats
from matplotlib.backends.backend_pdf import PdfPages
import os.path
from .storemanager import StoreManager
from .condition import Condition
from .constants import WILD_TYPE_VARIANT
from .sfmap import sfmap_plot
from .dataframe import singleton_dataframe
from .random_effects import rml_estimator
class Experiment(StoreManager):
"""
Class for a coordinating multiple :py:class:`~.selection.Selection`
objects. Creating an
:py:class:`~experiment.Experiment` requires a valid *config* object,
usually from a ``.json`` configuration file.
"""
store_suffix = "exp"
treeview_class_name = "Experiment"
def __init__(self):
StoreManager.__init__(self)
self.conditions = list()
self._wt = None
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
@property
def wt(self):
if self.has_wt_sequence():
if self._wt is None:
self._wt = self.selection_list()[0].wt.duplicate(self.name)
return self._wt
else:
if self._wt is not None:
raise ValueError(
"Experiment should not contain wild type "
"sequence [{}]".format(self.name)
)
else:
return None
def configure(self, cfg, configure_children=True):
"""
Set up the :py:class:`~experiment.Experiment` using the *cfg* object,
usually from a ``.json`` configuration file.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
if configure_children:
if "conditions" not in cfg:
raise KeyError(
"Missing required config value {} [{}]"
"".format("conditions", self.name)
)
for cnd_cfg in cfg["conditions"]:
cnd = Condition()
cnd.configure(cnd_cfg)
self.add_child(cnd)
selection_names = [x.name for x in self.selection_list()]
if len(set(selection_names)) != len(selection_names):
raise ValueError("Non-unique selection names [{}]" "".format(self.name))
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["conditions"] = [child.serialize() for child in self.children]
return cfg
def _children(self):
"""
Method bound to the ``children`` property. Returns a list of all
:py:class:`~condition.Condition` objects belonging to this object,
sorted by name.
"""
return sorted(self.conditions, key=lambda x: x.name)
def add_child(self, child):
"""
Add a selection.
"""
if child.name in self.child_names():
raise ValueError(
"Non-unique condition name '{}' [{}]" "".format(child.name, self.name)
)
child.parent = self
self.conditions.append(child)
def remove_child_id(self, tree_id):
"""
Remove the reference to a :py:class:`~condition.Condition` with
Treeview id *tree_id*.
"""
self.conditions = [x for x in self.conditions if x.treeview_id != tree_id]
def selection_list(self):
"""
Return the :py:class:`~selection.Selection` objects as a list.
"""
selections = list()
for cnd in self.children:
selections.extend(cnd.children)
return selections
def validate(self):
"""
Calls validate on all child Conditions. Also checks the wild type
sequence status.
"""
# check the wild type sequences
if self.has_wt_sequence():
for child in self.selection_list()[1:]:
if self.selection_list()[0].wt != child.wt:
self.logger.warning("Inconsistent wild type sequences")
break
for child in self.children:
child.validate()
def is_coding(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` count protein-coding variants, else
``False``.
"""
return all(x.is_coding() for x in self.selection_list())
def has_wt_sequence(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` have a wild type sequence, else
``False``.
"""
return all(x.has_wt_sequence() for x in self.selection_list())
def calculate(self):
"""
Calculate scores for all :py:class:`~selection.Selection` objects.
"""
if len(self.labels) == 0:
raise ValueError(
"No data present across all conditions [{}]" "".format(self.name)
)
for s in self.selection_list():
s.calculate()
self.combine_barcode_maps()
for label in self.labels:
self.calc_counts(label)
if self.scoring_method != "counts":
self.calc_shared_full(label)
self.calc_shared(label)
self.calc_scores(label)
if label != "barcodes":
self.calc_pvalues_wt(label)
def combine_barcode_maps(self):
"""
Combine all barcode maps for :py:class:`~selection.Selection` objects
into a single data frame and store it in ``'/main/barcodemap'``.
If multiple variants or IDs map to the same barcode, only the first one
will be present in the barcode map table.
The ``'/main/barcodemap'`` table is not created if no
:py:class:`~selection.Selection` has barcode map information.
"""
if self.check_store("/main/barcodemap"):
return
bcm = None
for sel in self.selection_list():
if "/main/barcodemap" in sel.store.keys():
if bcm is None:
bcm = sel.store["/main/barcodemap"]
else:
bcm = bcm.join(
sel.store["/main/barcodemap"], rsuffix=".drop", how="outer"
)
new = bcm.loc[pd.isnull(bcm)["value"]]
bcm.loc[new.index, "value"] = new["value.drop"]
bcm.drop("value.drop", axis="columns", inplace=True)
if bcm is not None:
bcm.sort_values("value", inplace=True)
self.store.put(
"/main/barcodemap", bcm, format="table", data_columns=bcm.columns
)
def calc_counts(self, label):
"""
Create a data frame of all counts in this Experiment. This data frame
is not used for any calculations, but is provided to facilitate
exploration of the data set.
"""
if self.check_store("/main/{}/counts".format(label)):
return
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
self.logger.info("Creating column multi-index for counts ({})".format(label))
conditions_index = list()
selections_index = list()
values_index = list()
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(sel.timepoints))
selections_index.extend([sel.name] * len(sel.timepoints))
values_index.extend(["c_{}".format(x) for x in sorted(sel.timepoints)])
columns = pd.MultiIndex.from_tuples(
zip(conditions_index, selections_index, values_index),
names=["condition", "selection", "timepoint"],
)
# create union index
self.logger.info("Creating row index for counts ({})".format(label))
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
"/main/{}/counts_unfiltered" "".format(label), "columns='index'"
).index
first = False
else:
combined = combined.join(
s.store.select(
"/main/{}/counts_unfiltered".format(label), "columns='index'"
).index,
how="outer",
)
# create and fill the data frames
self.logger.info(
"Populating Experiment data frame with counts ({})".format(label)
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select(
"/main/{}/counts_unfiltered" "".format(label)
)
for tp in sel.timepoints:
data.loc[:][cnd.name, sel.name, "c_{}".format(tp)] = sel_data[
"c_{}".format(tp)
]
self.store.put("/main/{}/counts".format(label), data, format="table")
def calc_shared_full(self, label):
"""
Use joins to create a data frame containing all scores across all
Selections in the Experiment.
"""
if self.check_store("/main/{}/scores_shared_full".format(label)):
return
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
self.logger.info("Creating column multi-index for scores ({})")
conditions_index = list()
selections_index = list()
values_index = list()
if self.scoring_method == "simple":
values_list = ["score"]
else:
values_list = ["score", "SE"]
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(values_list))
selections_index.extend([sel.name] * len(values_list))
values_index.extend(sorted(values_list))
columns = pd.MultiIndex.from_tuples(
zip(conditions_index, selections_index, values_index),
names=["condition", "selection", "value"],
)
# create union index
self.logger.info("Creating row index for scores ({})".format(label))
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
"/main/{}/scores".format(label), "columns='index'"
).index
first = False
else:
combined = combined.join(
s.store.select(
"/main/{}/scores".format(label), "columns='index'"
).index,
how="outer",
)
# create and fill the data frames
self.logger.info(
"Populating Experiment data frame with scores ({})".format(label)
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select("/main/{}/scores".format(label))
for v in values_list:
data.loc[:, (cnd.name, sel.name, v)] = sel_data[v]
self.store.put(
"/main/{}/scores_shared_full".format(label), data, format="table"
)
def calc_shared(self, label):
"""
Get the subset of scores that are shared across all Selections in each
Condition.
"""
if self.check_store("/main/{}/scores_shared".format(label)):
return
idx = pd.IndexSlice
self.logger.info(
"Identifying subset shared across all Selections ({})".format(label)
)
data = self.store.select("/main/{}/scores_shared_full".format(label))
# identify variants found in all selections in at least one condition
complete = np.full(len(data.index), False, dtype=bool)
for cnd in data.columns.levels[0]:
complete = np.logical_or(
complete, data.loc[:, idx[cnd, :, :]].notnull().all(axis="columns")
)
data = data.loc[complete]
self.store.put("/main/{}/scores_shared".format(label), data, format="table")
def calc_scores(self, label):
"""
Combine the scores and standard errors within each condition.
"""
if self.check_store("/main/{}/scores".format(label)):
return
self.logger.info("Calculating per-condition scores ({})".format(label))
# set up new data frame
shared_index = self.store.select(
"/main/{}/scores_shared" "".format(label), columns="index"
).index
columns = pd.MultiIndex.from_product(
[sorted(self.child_names()), sorted(["score", "SE", "epsilon"])],
names=["condition", "value"],
)
data = | pd.DataFrame(np.nan, index=shared_index, columns=columns) | pandas.DataFrame |
import time
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from operator import itemgetter
import math
import csv
attributeType = ["qualitative", "numerical", "qualitative", "qualitative", "numerical", "qualitative", "qualitative",
"numerical", "qualitative", "qualitative", "numerical", "qualitative", "numerical", "qualitative",
"qualitative", "numerical", "qualitative", "numerical", "qualitative", "qualitative"]
def performLabelEncoding(dataframe):
le = preprocessing.LabelEncoder()
i = 0
# For every column
for column in dataframe:
# Excluding the last two
if i == 20:
break
# If attribute is qualitative
if attributeType[i] == "qualitative":
# Label encode it
dataframe[column] = le.fit_transform(dataframe[column])
i += 1
return dataframe
def createPlots(dataframe):
good = dataframe[dataframe["Label"] == 1]
bad = dataframe[dataframe["Label"] == 2]
i = 0
# For every column
for column in dataframe:
# Excluding the last two
if i == 20:
break
# If attribute is qualitative
if attributeType[i] == "qualitative":
plt.title(column + " Good")
good[column].value_counts().plot(kind='bar')
name = "output/Attribute" + str(i + 1) + "_" + "good.png"
savefig(name)
plt.figure()
plt.title(column + " Bad")
bad[column].value_counts().plot(kind='bar')
name = "output/Attribute" + str(i + 1) + "_" + "bad.png"
savefig(name)
if i < 19:
plt.figure()
# If attribute is numerical
elif attributeType[i] == "numerical":
plt.title(column + " Good")
good.boxplot(column)
name = "output/Attribute" + str(i + 1) + "_" + "good.png"
savefig(name)
plt.figure()
plt.title(column + " Bad")
bad.boxplot(column)
name = "output/Attribute" + str(i + 1) + "_" + "bad.png"
savefig(name)
if i < 19:
plt.figure()
i += 1
def classifiers(dataframe):
kf = KFold(n_splits=10)
attributeColumns = dataframe.iloc[:, 0:20]
svm_accuracy = 0
# Run SVM
print("Running SVM...(this might take some time)")
for train_index, test_index in kf.split(dataframe):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = svm.SVC(gamma=1.0, C=1.0, kernel="rbf").fit(X_train_counts,
np.array(dataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
svm_accuracy += accuracy_score(np.array(dataframe["Label"])[test_index], yPred)
svm_accuracy /= 10
print("SVM Accuracy: ", svm_accuracy)
rf_accuracy = 0
# Run Random Forests
print("Running Random Forest...")
for train_index, test_index in kf.split(dataframe):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = RandomForestClassifier().fit(X_train_counts, np.array(dataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
rf_accuracy += accuracy_score(np.array(dataframe["Label"])[test_index], yPred)
rf_accuracy /= 10
print("Random Forest Accuracy: ", rf_accuracy)
nb_accuracy = 0
# Run Naive Bayes
print("Running Naive Bayes...")
for train_index, test_index in kf.split(dataframe):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = MultinomialNB().fit(X_train_counts, np.array(dataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
nb_accuracy += accuracy_score(np.array(dataframe["Label"])[test_index], yPred)
nb_accuracy /= 10
print("Naive Bayes Accuracy: ", nb_accuracy)
# Output to a .csv file
out_file = open("output/EvaluationMetric_10fold.csv", 'w')
wr = csv.writer(out_file, delimiter="\t")
firstLine = ["Statistic Measure", "Naive Bayes", "Random Forest", "SVM"]
wr.writerow(firstLine)
secondLine = ["Accuracy", nb_accuracy, rf_accuracy, svm_accuracy]
wr.writerow(secondLine)
def predictions(dataframe, test_dataframe):
test_dataframe = performLabelEncoding(test_dataframe)
# Convert to numpy array only the attributes (exclude label & id)
X_train = np.array(dataframe.iloc[:, 0:20])
X_test = np.array(test_dataframe.iloc[:, 0:20])
clf_cv = RandomForestClassifier().fit(X_train, np.array(dataframe["Label"]))
predicted = clf_cv.predict(X_test)
# Output to a .csv file
out_file = open("output/testSet_Predictions.csv", 'w')
wr = csv.writer(out_file, delimiter="\t")
firstLine = ["Client_ID", "Predicted_Label"]
# Write the first line
wr.writerow(firstLine)
# For every prediction
for i in range(len(test_dataframe)):
# If its good
if predicted[i] == 1:
line = [int(test_dataframe["Id"][i]), "Good"]
# If its bad
else:
line = [int(test_dataframe["Id"][i]), "Bad"]
# Write the line
wr.writerow(line)
def entropy(dataframe, attribute):
attributeFrequency = {}
entropy = 0.0
# For every row of the dataframe, count the frequencies per value
for i in range(len(dataframe)):
value = dataframe[attribute][i]
if value in attributeFrequency:
attributeFrequency[value] += 1.0
else:
attributeFrequency[value] = 1.0
# For each value apply the entropy formula
for frequency in attributeFrequency.values():
entropy += (-frequency / len(dataframe)) * math.log(frequency / len(dataframe), 2)
return entropy
def informationGain(dataframe, attribute):
attributeFrequency = {}
subsetEntropy = 0.0
# For every row of the dataframe, count the frequencies per value
for i in range(len(dataframe)):
value = dataframe[attribute][i]
if value in attributeFrequency:
attributeFrequency[value] += 1.0
else:
attributeFrequency[value] = 1.0
# For each value apply the information gain formula
for keyValue in attributeFrequency.keys():
weight = attributeFrequency[keyValue] / sum(attributeFrequency.values())
dataframeSubset = pd.DataFrame()
# Create a subset of the dataframe
for i in range(len(dataframe)):
value = dataframe[attribute][i]
if value == keyValue:
dataframeSubset.append(dataframe.iloc[i, 0:20])
subsetEntropy += weight * entropy(dataframeSubset, attribute)
return entropy(dataframe, attribute) - subsetEntropy
def featureSelection(dataframe, encodedDataframe):
print("Calculating information gain for every attribute...", end=' ')
attributeInfoGain = []
# For every column
i = 0
for column in dataframe:
# Excluding the last two
if i == 20:
break
i += 1
ig = informationGain(dataframe, column)
attributeInfoGain.append((column, ig))
accuracyArray = []
attributeInfoGain.sort(key=itemgetter(1))
print("Done!")
for t in attributeInfoGain:
print(t[0], "%.2f" % t[1])
attributeColumns = encodedDataframe.iloc[:, 0:20]
for attribute, infoGain in attributeInfoGain:
kf = KFold(n_splits=10)
rf_accuracy = 0
# Run Random Forests
print("Running Random Forest with", attributeColumns.shape[1], "features...", end=' ')
for train_index, test_index in kf.split(attributeColumns):
X_train_counts = np.array(attributeColumns)[train_index]
X_test_counts = np.array(attributeColumns)[test_index]
clf_cv = RandomForestClassifier().fit(X_train_counts, np.array(encodedDataframe["Label"])[train_index])
yPred = clf_cv.predict(X_test_counts)
rf_accuracy += accuracy_score(np.array(encodedDataframe["Label"])[test_index], yPred)
rf_accuracy /= 10
print("Accuracy: ", rf_accuracy)
accuracyArray.append(rf_accuracy)
attributeColumns = attributeColumns.drop(attribute, axis=1)
print(attribute, "with information gain %.2f" % infoGain, "removed\n")
sh = attributeColumns.shape
if sh[1] == 0:
break
x_axis = [i for i in range(1, 21)]
x_axis_reversed = [i for i in reversed(range(1, 21))]
t = []
for i in range(0, 19):
t.append((x_axis, accuracyArray))
plt.figure()
plt.plot(x_axis, accuracyArray)
plt.xticks(x_axis, x_axis_reversed)
plt.ylabel('Accuracy')
plt.xlabel('Number of features')
savefig("output/attribute_removal_accuracy_penalty.png")
def createBins(dataframe):
i = 0
# For every column
for column in dataframe:
# Excluding the last two
if i == 20:
break
# If attribute is numerical
if attributeType[i] == "numerical":
# Create bins
dataframe[column] = pd.cut(dataframe[column], bins=5, labels=False)
i += 1
return dataframe
if __name__ == "__main__":
os.makedirs(os.path.dirname("output/"), exist_ok=True)
start_time = time.time()
dataframe = | pd.read_csv('./datasets/train.tsv', sep='\t') | pandas.read_csv |
import pandas as pd
import time
import numpy as np
# Scrape ESPN Free Agent Tracker for free agent contracts
years = [2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021]
espn_salary_data = pd.DataFrame()
espn_urls = []
for y in years:
url = "http://www.espn.com/mlb/freeagents/_/year/" + str(y)
espn_urls.append(url)
for url in espn_urls:
salary_url = pd.Series(url)
espn_url = salary_url.str.split('/|-', expand=True)
espn_url.columns = ['protocol', 'blank', 'path_one', 'path_two', 'path_three', '_', 'fix', 'year']
espn_salary_table = pd.read_html(url)[0]
new_header = espn_salary_table.iloc[1] # grab the first row for the header
espn_salary_table = espn_salary_table[2:] # take the data less the header row
espn_salary_table.columns = new_header # set the header row as the df header
espn_salary_table = espn_salary_table.rename(columns={espn_salary_table.columns[4]: "PREV TEAM"})
espn_salary_table = espn_salary_table[espn_salary_table['PLAYER'] != "PLAYER"].reset_index(drop=True)
espn_salary_table['Season'] = espn_url['year'][0]
espn_salary_data = espn_salary_data.append(espn_salary_table, ignore_index=True)
time.sleep(5)
espn_salary_data['Season'] = espn_salary_data['Season'].astype('int') + 1
espn_salary_data['YRS'] = espn_salary_data['YRS'].fillna(0)
espn_salary_data['YRS'] = espn_salary_data['YRS'].astype('int')
espn_data = espn_salary_data[(espn_salary_data['Season']) < 2022]
# Scrape MLB Trade Rumors FA Tracker for option and qualifying offer data
tr_salary_data = pd.DataFrame()
mlb_tr_urls = []
for y in years:
url = "https://www.mlbtraderumors.com/" + str(y) + "-mlb-free-agent-tracker/"
mlb_tr_urls.append(url)
for url in mlb_tr_urls:
tr_salary_url = pd.Series(url)
tr_url = tr_salary_url.str.split('/|-', expand=True)
tr_url.columns = ['protocol', 'blank', 'path_one', 'year', 'path_two', 'path_three',
'path_four', 'path_five', 'blank_two']
tr_salary_table = pd.read_html(url)[1]
tr_salary_table = tr_salary_table.rename(columns={"Qual?": "Qual"})
tr_salary_table['Season'] = tr_url['year'][0]
tr_salary_data = tr_salary_data.append(tr_salary_table, ignore_index=True)
time.sleep(5)
mlb_tr_data = tr_salary_data
# Reformat dtypes
mlb_tr_data['Season'] = mlb_tr_data['Season'].astype('int')
mlb_tr_data['Years'] = mlb_tr_data['Years'].fillna(0)
mlb_tr_data['Years'] = mlb_tr_data['Years'].astype('int')
# Fix column names
espn_data.columns = ['Player', 'Position', 'Age', 'Status', 'Prev Team', 'Team', 'Years', 'Rank', 'Salary', 'Season']
mlb_tr_data.columns = ['Player', 'Position', 'Team', 'Qual', 'Years', 'Amount', 'AAV', 'Option', 'Season']
# Select certain columns for ease of view
mlb_tr_data = mlb_tr_data[['Player', 'Team', 'Qual', 'Years', 'Amount', 'Option', 'Season']]
# Merge espn and tr data on player name, team, and season year
salary_data = pd.merge(espn_data, mlb_tr_data, how='left',
left_on=['Player', 'Team', 'Season'], right_on=['Player', 'Team', 'Season'])
# salary_data['Qual'] = salary_data['Qual'].fillna(0)
# print(salary_data['Qual'].unique())
def salary_formatting(df):
# Take max years #, determined from vesting or misreporting on some outlets
df['Years_y'] = df['Years_y'].fillna(0)
df['Years'] = np.where(df['Years_x'] >= df['Years_y'],
df['Years_x'],
df['Years_y'])
del df['Years_x'], df['Years_y']
# Remove rows where years == 0 aka player unsigned
df = df[(df['Years'] > 0) | (df['Season'] == max(df['Season']))].reset_index(drop=True)
# Begin reformatting salary data for analysis, replace $ sign
df["Salary"] = df["Salary"].apply(lambda x: x.replace("$", ""))
# replace NA from espn with mlb_tr data
df['Salary'].fillna(df['Amount'], inplace=True)
# replace -- from ESPN with mlb_tr data
df['Salary'] = np.where(df['Salary'] == "--",
df['Amount'],
df['Salary'])
# Secondary column used to reformat salaries such as ($1.5MM)
df['Value'] = 0
df.loc[df['Amount'].str.contains('$', na=False), 'Value'] = 1
# Refine minor league definition
df['Salary'] = np.where((df['Value'] == 1) & (df['Salary'] == "Minor Lg"),
df['Amount'],
df['Salary'])
df['fix_salary_format'] = 0
df.loc[df['Salary'].str.contains('MM', na=False), 'fix_salary_format'] = df['Salary']
df['fix_salary_format'] = df['fix_salary_format'].str.replace("MM", "")
df['fix_salary_format'] = df['fix_salary_format'].str.replace("$", "")
df['fix_salary_format'] = df['fix_salary_format'].fillna(0)
df['fix_salary_format'] = pd.to_numeric(df['fix_salary_format'])
df['fix_salary_format'] = np.where(df['fix_salary_format'] > 0,
df['fix_salary_format'] * 1000000,
df['fix_salary_format'])
df['Salary'] = np.where(df['fix_salary_format'] > 0,
df['fix_salary_format'],
df['Salary'])
df['Salary'] = np.where((df['Salary'] == "Minor Lg") | (df['Salary'] == "Minor"),
600000,
df['Salary'])
df['Salary'] = df['Salary'].str.replace(",", "")
df['Salary'] = df['Salary'].fillna(0)
# fix "K" values
df['fix_salary_format'] = 0
df.loc[df['Salary'].str.contains('K', na=False), 'fix_salary_format'] = df['Salary']
df['fix_salary_format'] = df['fix_salary_format'].astype('str')
df['fix_salary_format'] = df['fix_salary_format'].str.replace("K", "")
df['fix_salary_format'] = df['fix_salary_format'].str.replace("$", "")
df['fix_salary_format'] = | pd.to_numeric(df['fix_salary_format']) | pandas.to_numeric |
from pandas.core.common import notnull, isnull
import pandas.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert isnull(np.inf)
assert isnull(-np.inf)
def test_any_none():
assert(common._any_none(1, 2, 3, None))
assert(not common._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(common._all_not_none(1, 2, 3, 4))
assert(not common._all_not_none(1, 2, 3, None))
assert(not common._all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = common.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(common.iterpairs(data))
assert(result == expected)
def test_indent():
s = 'a b c\nd e f'
result = common.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = common.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4 : 0, 3 : 1, 2 : 2, 1 : 3}
result = common.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(common.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(common.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted( | common.intersection(a, b) | pandas.core.common.intersection |
# -*- coding: utf-8 -*-
"""
Scrape match ids from datdota for pro matches
# Tournaments
- TI4
"""
import os
import json
import time
from lxml import html
import requests
import pandas as pd
from sqlalchemy import create_engine
with open(os.path.expanduser('/Users/tom.augspurger/Dropbox/bin/api-keys.txt')) as f:
KEY = json.load(f)['steam']
def get_tournaments():
url = "http://www.datdota.com/events.php"
tree = html.parse(url)
root = tree.getroot()
tournaments = filter(lambda x: 'tournament' in x[2], root.iterlinks())
pairs = ((x[0].text, x[2].split('&')[0]) for x in tournaments)
base = "http://www.datdota.com/"
seen = set()
for name, q in pairs:
if q not in seen:
df = scrape_tournament(base + q)
if df is None:
continue
df['tournament_name'] = name
seen.update(q)
yield df
def scrape_tournament(url):
"""
url -> Maybe DataFrame
"""
tables = | pd.read_html(url, attrs={'class': 'dataTable'}) | pandas.read_html |
# https://www.youtube.com/watch?v=bvDkel5whUY
import pandas as pd
import numpy as np
import requests
#!pip install yfinance
import yfinance as yf
from datetime import date
import datetime as dt
import streamlit as st
#!pip install PyPortfolioOpt
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
#pip install pulp
# Get the discret allocation of each stock
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
#pip install cvxpy
#pip install cvxopt
st.title('Killer Stock Portfolio App')
st.markdown("""
This app retrieves the list of the **S&P 500** and **FTSE 100** from Wikipedia. Then gets the corresponding **stock closing price** , and generate a killer portfolio with fund allocation!
* **Python libraries:** base64, pandas, streamlit, numpy, matplotlib, seaborn, yfinance
* **Data source:** [Wikipedia](https://en.wikipedia.org/wiki/List_of_S%26P_500_companies).
""")
st.sidebar.header('User Input Features')
# Web scraping of S&P 500 data
#
@st.cache
def load_data():
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
html = pd.read_html(url, header = 0)
df = html[0]
return df
@st.cache
def load_ftse100data():
url = 'https://en.wikipedia.org/wiki/FTSE_100_Index'
html = pd.read_html(url, header = 0)
df = html[3]
return df
# Download stock data from Yahoo Finance
#
@st.cache
def get_data(symbols):
symbols2 =[]
t = []
today = date.today()
# End date of the stock
d1 = today.strftime("%Y-%m-%d")
d = periods*30
d0 = date.today()-dt.timedelta(days=d)
# Start date of the stock
#d0 = '2010-5-31'
#print("d1 =", d1)
l = -1
# get all the data
tDf = pd.DataFrame()
for tickerSymbol in symbols:
#get data on this ticker
tickerData = yf.Ticker(tickerSymbol)
#print(tickerData)
#get the historical prices for this ticker
#tickerDf = tickerData.history(period='1d', start='2010-5-31', end='2021-8-31')
tickerDf = tickerData.history(period='1d', start= d0, end= d1)
#print(tickerDf.empty)
if not tickerDf.empty:
#tDf.append(tickerDf.Close)
#tDf.append(tickerDf['Close'].values)
if tDf.empty:
tDf = | pd.DataFrame(tickerDf.Close) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[19]:
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
import pandas as pd
import scipy.special as special
import random
import sys
# In[20]:
#cd D:\ThisSemester\CompNeuro\Homeworks\Hw3\HW3_Can_Kocagil\Assignment
question = input('Please select question number [1/2]')
# ### Question 1
# In[21]:
def Can_Kocagil_21602218_Hw3(question):
if question == '1' :
f = h5py.File('hw3_data2.mat','r')
X = np.array(f.get('Xn')).T
y = np.array(f.get('Yn')).flatten()
print(X.shape,y.shape)
# In[22]:
def random_seed(seed:int = 42) -> None :
""" Random seeding for reproducebility
Arguments:
- seed (int) : random state
Returns:
- None
"""
np.random.seed(seed)
random.seed(seed)
# In[23]:
class RidgeRegression(object):
"""
Ridge regression is a method of estimating the coefficients of multiple-regression models in
scenarios where independent variables are highly correlated.
"""
def __init__(self,Lambda:float=1):
"""
Constructer method for initilization of ridge regression model.
Arguments:
- Lambda (float): is the parameter which balances the amount
of emphasis given to minimizing RSS vs minimizing sum of square of coefficients
"""
self.Lambda = Lambda
def fit(self, X:np.ndarray, y:np.ndarray) -> None:
"""
Given the pair of X,y, fit the data, i.e., find parameter W such that sum of square error
is minimized.
Arguments:
- X (np.ndarray) : Regressor data
- X (np.ndarray) : Ground truths for regressors
Returns:
- None
"""
I = np.eye(X.shape[1])
self.W = np.linalg.inv(
X.T.dot(X) + self.Lambda * I
).dot(X.T).dot(y)
return self
def predict(self,X:np.ndarray) -> np.ndarray :
"""
Given the test data X, we predict the target variable.
Arguments:
- X (np.ndarray) : The independant variable (regressor)
Returns:
- Y_hat (np.ndarray) : Estimated value of y
"""
return X.dot(self.W)
def parameters(self) -> None:
"""
Returns the estimated parameter W of the Ridge Regression
"""
return self.W
def eval_r2(self,y_true:np.ndarray, y_pred:np.ndarray) -> np.float:
"""
Given the true dependant variable and estimated variable, computes proportion of
explained variance R^2 by square the Pearson correlation between true dependant
variable and estimated variabl
Arguments:
- y_true (np.ndarray) : true dependant variable
- y_pred (np.ndarray) : estimated variable
Returns:
- r_squared (np.float) : Proportion of explained variance
"""
_pearson = np.corrcoef(y_true,y_pred)
pearson = _pearson[1][0]
r_squared = np.square(pearson)
return r_squared
@staticmethod
def R2(y_true:np.ndarray,y_pred:np.ndarray) -> np.float:
r_squared = (1 - (sum((y_true - (y_pred))**2) / ((len(y_true) - 1) * np.var(y_true.T, ddof=1)))) * 100
return r_squared
def __str__(self):
model = RidgeRegression().__class__.__name__
model += f" with parameter \n"
model += f"{self.Lambda}"
return model
def __repr__(self):
model = RidgeRegression().__class__.__name__
model += f" with parameter \n"
model += f"{self.Lambda}"
return model
# In[24]:
# In[25]:
class K_fold(object):
"""
Cross-validation, sometimes called rotation estimation or out-of-sample testing,
is any of various similar model validation techniques for assessing how the results
of a statistical analysis will generalize to an independent data set
"""
def __init__(self,sample_size:int = y.shape[0], folds:int = 10):
"""
Constructer method for initializing the sample size and the number of folds
Arguments:
- sample_size (int) : How many samples are in the dataset
- folds (int) : the number of folds
"""
self.sample_size = sample_size
self.folds = folds
self.fold_size = int(sample_size / folds)
def split(self):
"""
Generator function for splitting data as validation (10%), testing (10%) and
training (80%) as K-fold cross validation based resampling
"""
for idx in range(self.folds):
_val_idx = idx * self.fold_size
_test_idx = (idx + 1) * self.fold_size
_train_idx = (idx + 2) * self.fold_size
val_idx = np.arange(_val_idx, _test_idx) % self.sample_size
test_idx = np.arange(_test_idx, _train_idx) % self.sample_size
train_idx = np.arange(_train_idx, self.sample_size + _val_idx) % self.sample_size
yield val_idx, test_idx, train_idx
# In[26]:
dict_inference = {
'test' : dict(),
'val' : dict()
}
phases = [
'train',
'val',
'test'
]
log_lambda_arr = np.logspace(
start = 0,
stop = 12,
num = 500,
base = 10
)
cv = K_fold(folds = 10)
for val_idx, test_idx, train_idx in cv.split():
X_list = [
X[train_idx],
X[val_idx],
X[test_idx]
]
y_list = [
y[train_idx],
y[val_idx],
y[test_idx]
]
for _lambda in log_lambda_arr:
for phase, X_phase, y_phase in zip(phases, X_list, y_list):
if phase == 'train':
model = RidgeRegression(_lambda)
model.fit(X_phase, y_phase)
else:
preds = model.predict(X_phase)
r2_score = model.eval_r2(y_phase, preds)
dict_inference[phase].setdefault(
_lambda, list()).append(r2_score)
inference_r2 = {
phase : {
_lambda : np.mean(r2_score) for _lambda, r2_score in dict_inference[phase].items()
}
for phase in ['val','test']
}
# In[27]:
best_r2 = 0
for _lambda, r_2 in inference_r2['val'].items():
if r_2 > best_r2:
best_r2 = r_2
best_lambda = _lambda
print(f'Best lambda parameter that maximizes the R^2 is : {best_lambda}')
print('Best R^2 along the testing :', inference_r2['test'][best_lambda])
print('Best R^2 along the validation :', inference_r2['val'][best_lambda])
# In[28]:
lists1 = sorted(inference_r2['val'].items())
x1, y1 = zip(*lists1)
lists2 = sorted(inference_r2['test'].items())
x2, y2 = zip(*lists2)
plt.figure(figsize = (10,5))
plt.plot(x2, y2, color='orange')
plt.plot(x1, y1, color='g')
plt.legend(['test', 'validation'])
plt.ylabel('$R^2$')
plt.xlabel('$\lambda$')
plt.title('$R^2$ versus $\lambda$')
plt.xscale('log')
plt.grid()
plt.show()
# In[29]:
random_seed(10)
bootstrap_iters = range(500)
sample_idx = np.arange(X.shape[0])
parameters = list()
for idx in bootstrap_iters:
bootstrap_idx = np.random.choice(sample_idx, size = 1000, replace = True)
y_bootstrap = y[bootstrap_idx]
X_bootstrap = X[bootstrap_idx]
ridge = RidgeRegression(Lambda = 0)
ridge.fit(X_bootstrap,y_bootstrap)
parameters.append(ridge.parameters())
w_bootstrap = np.array(parameters)
w_mean = np.mean(w_bootstrap, axis=0)
w_std = np.std(w_bootstrap, axis=0)
# In[30]:
plt.figure(figsize = (10,5))
plt.errorbar(np.arange(1, 101),
w_mean,
yerr= w_std,
ecolor='red',
elinewidth=1,
capsize=1)
plt.title('Ridge Model OLS Weights')
plt.xlabel('i')
plt.ylabel('$W_i$')
plt.show()
# In[31]:
two_sided = 2
p_values = special.ndtr(- w_mean / w_std) * two_sided
alpha_level = 0.05
significants = np.argwhere(p_values < alpha_level).flatten()
print(f' Index of the parameters that are significantly different than 0: \n {significants}')
# In[32]:
random_seed(10)
bootstrap_iters = range(500)
sample_idx = np.arange(X.shape[0])
parameters = list()
for idx in bootstrap_iters:
bootstrap_idx = np.random.choice(sample_idx, size = 1000, replace = True)
y_bootstrap = y[bootstrap_idx]
X_bootstrap = X[bootstrap_idx]
ridge = RidgeRegression(Lambda = best_lambda)
ridge.fit(X_bootstrap,y_bootstrap)
parameters.append(ridge.parameters())
w_bootstrap = np.array(parameters)
w_mean = np.mean(w_bootstrap, axis=0)
w_std = np.std(w_bootstrap, axis=0)
# In[33]:
plt.figure(figsize = (10,5))
plt.errorbar(np.arange(1, 101),
w_mean,
yerr= w_std,
ecolor='red',
elinewidth=1,
capsize=1)
plt.title('Ridge Model $\lambda_{optimal}$ Weights')
plt.xlabel('i')
plt.ylabel('$W_i$')
plt.show()
# In[34]:
p_values = scipy.special.ndtr(- w_mean / w_std) * two_sided
significants = np.argwhere(p_values < alpha_level).flatten()
print(f' Index of the parameters that are significantly different than 0: \n {significants}')
elif question == '2' :
two_sided = 2
def random_seed(seed:int = 42) -> None :
""" Random seeding for reproducebility
Arguments:
- seed (int) : random state
Returns:
- None
"""
np.random.seed(seed)
random.seed(seed)
# ### Question 2
# ## Part A
# In[44]:
f = h5py.File('hw3_data3.mat','r')
pop1 = np.array(
f.get('pop1')
)
pop2 = np.array(
f.get('pop2')
)
# In[45]:
def bootstrap(sample:np.ndarray, bootstrap_iters:iter = range(10000), random_state:int = 11) -> np.ndarray:
"""
Generate bootstrap samples using random sampling with replacement.
Arguments:
- sample (np.ndarray) : Sample to be bootstraped
- bootstrap_iters (iterator object) : Specification of bootstrap iterations
- random_state (int) : Random seed for reproducibility
Returns:
- bootstrap_samples (np.ndarray) : Bootstrapped array
"""
random_seed(random_state)
size = sample.shape[0]
bootstrap_samples = list()
for idx in bootstrap_iters:
bootstrap_idx = np.random.choice(np.arange(sample.shape[0]), size = size, replace = True)
bootstrap_samples.append(sample[bootstrap_idx])
return np.array(bootstrap_samples)
# In[46]:
pop = np.vstack([pop1,pop2])
pop_bootstrap = bootstrap(pop)
sample_1 = pop_bootstrap[:,:len(pop1)].squeeze(2)
sample_2 = pop_bootstrap[:,len(pop1):].squeeze(2)
sample_1_bootstrap_mean = sample_1.mean(axis = 1)
sample_2_bootstrap_mean = sample_2.mean(axis = 1)
sample_diff_means = sample_1_bootstrap_mean - sample_2_bootstrap_mean
sample_mean_dist = pd.DataFrame()
sample_mean_dist['Mean Difference'] = sample_diff_means.flatten()
fig, ax = plt.subplots(figsize = (10,5))
sample_mean_dist.plot.kde(ax=ax, title='Difference of Means of Bootstrapped Populations 1 and 2')
sample_mean_dist.plot.hist(density=True, ax = ax, bins = 15)
ax.set_ylabel('Probability $P_X(x)$')
ax.set_xlabel('Difference in means (x)')
ax.grid(axis='y')
ax.set_yticks([])
# In[47]:
pop1_bootstrap = bootstrap(pop1)
pop2_bootstrap = bootstrap(pop2)
pop1_bootstrap_mean = np.mean(pop1_bootstrap, axis = 1)
pop2_bootstrap_mean = np.mean(pop2_bootstrap, axis = 1)
mean_dist = pd.DataFrame()
mean_dist['pop1 Mean'] = pop1_bootstrap_mean.flatten()
mean_dist['pop2 Mean'] = pop2_bootstrap_mean.flatten()
mean_dist['Mean Difference'] = pop1_bootstrap_mean - pop2_bootstrap_mean
fig, ax = plt.subplots(figsize = (10,5))
mean_dist.plot.kde(ax=ax, title='Difference of Means of Bootstrapped Populations 1 and 2')
mean_dist.plot.hist(density=True, ax = ax, bins = 15)
ax.set_ylabel('Probability $P_X(x)$')
ax.set_xlabel('Difference in means (x)')
ax.grid(axis='y')
ax.set_yticks([])
fig, ax = plt.subplots(figsize = (10,5))
mean_dist['Mean Difference'].plot.kde(ax=ax,legend = True, title='Difference of Means of Bootstrapped Populations 1 and 2')
mean_dist['Mean Difference'].plot.hist(density=True, ax = ax, bins = 15)
ax.set_ylabel('Probability $P_X(x)$')
ax.set_xlabel('Difference in means (x)')
ax.grid(axis='y')
ax.set_yticks([])
# In[48]:
actual_diff_means = pop1.mean() - pop2.mean()
std_test = sample_mean_dist['Mean Difference'].std()
mean_test = sample_mean_dist['Mean Difference'].mean()
z_cal = (mean_test - actual_diff_means) / std_test
p_values = scipy.special.ndtr(z_cal) * two_sided
print('The two sided p-value is:', p_values)
# ## Part B
# In[49]:
vox1 = np.array(
f.get('vox1')
).flatten()
vox2 = np.array(
f.get('vox2')
).flatten()
print(
vox1.shape,
vox2.shape
)
vox1_bootstrap = bootstrap(vox1)
vox2_bootstrap = bootstrap(vox2)
def corr(X: list or np.ndarray,Y: list or np.ndarray) -> list:
"""
Given the X,Y distributions, computes the Pearson Correlation element wise.
Arguments:
- X (list or np.ndarray) : First distribution
- Y (list or np.ndarray) : Second distribution
Returns:
- pearson_corrs (list[float]) : Computed correlations element wise
"""
assert X.shape == Y.shape, 'Dimension Mismatch!'
return [scipy.stats.pearsonr(X[i], Y[i])[0] for i in range(X.shape[0])]
corr_bootstrap = corr(vox1_bootstrap,vox2_bootstrap)
fig, ax = plt.subplots(figsize = (10,5))
pd.Series(corr_bootstrap).plot.kde(ax=ax, legend = False, title='Sampling Distribution of Correlation between vox1 and vox2')
pd.Series(corr_bootstrap).plot.hist(density=True, ax = ax, bins = 20, alpha = 0.8,color = 'red')
ax.set_ylabel('Probability $P_Y(y)$')
ax.set_xlabel('Pearson Correlation y')
ax.grid(axis='y')
ax.set_yticks([])
# Thanks to https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data
def confidence_interval(data: list or np.ndarray, confidence:float=0.95) -> tuple:
"""
Given the distribution and confidence level, computes the confidence interval.
Arguments:
- data (list or np.ndarray) : Input distribution
- confidence (float) : confidence level in the range [0,1]
Returns:
- confidence_level (tuple[np.ndarray]) : lower, upper limits respectively
"""
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m-h, m+h
def _confidence_interval(data, confidence=0.95):
return scipy.stats.t.interval(confidence, len(data)-1, loc=np.mean(data), scale=st.sem(data))
corr_mean = np.mean(corr_bootstrap)
lower, upper = confidence_interval(corr_bootstrap,confidence=0.95)
print('Mean correlation value:', corr_mean)
print(f'95% confidence interval of the correlation values: {lower, upper}')
is_corr_zero = np.argwhere(corr_bootstrap == 0)
corr_zero_percentage = 100 * is_corr_zero.shape[0] / 10000
print('Percentage of zero correlation values:', corr_zero_percentage)
# ## Part C
# In[50]:
vox1_ind = bootstrap(vox1, range(10000), random_state=42)
vox2_ind = bootstrap(vox2, range(10000), random_state=21)
_corr_ind = corr(vox1_ind,vox2_ind)
corr_ind = pd.Series(_corr_ind)
fig, ax = plt.subplots(figsize = (10,5))
corr_ind.plot.kde(ax=ax, legend = False, title='Sampling Distribution of Correlation between vox1 and vox2')
corr_ind.plot.hist(density=True, ax = ax, bins = 20, alpha = 0.8,color = 'red')
ax.set_ylabel('Probability $P_Y(y)$')
ax.set_xlabel('Pearson Correlation y')
ax.grid(axis='y')
ax.set_yticks([])
actual_corr, _ = scipy.stats.pearsonr(vox1,vox2)
mean_corr = corr_ind.mean()
std_corr = corr_ind.std()
z_score = mean_corr - actual_corr
z_score /= std_corr
p_value = scipy.special.ndtr(z_score)
print('The one sided p-value is:', p_value)
# ## Part D
# In[52]:
building = np.array(f.get('building')).flatten()
face = np.array(f.get('face')).flatten()
print(
building.shape,
face.shape
)
random_seed(31)
assert building.shape[0] == face.shape[0],'Dimensionality Mismatch!'
sample_size = np.arange(building.shape[0])
_mean_diff = list()
bootstrap_iters = np.arange(10000)
for ii in bootstrap_iters:
resample = []
for jj in sample_size:
bootstrap_idx = np.random.choice(np.arange(building.shape[0]), replace = True)
options = [0] * 2
_option = building[jj] - face[jj]
options.append(_option)
_option = face[jj] - building[jj]
options.append(_option)
resample.append(np.random.choice(options))
_mean_diff.append(np.mean(resample))
mean_diff = pd.Series(_mean_diff)
fig, ax = plt.subplots(figsize = (10,5))
mean_diff.plot.kde(ax=ax, legend = False, title='Difference in means of building and face')
mean_diff.plot.hist(density=True, ax = ax, bins = 40, alpha = 0.8, color = 'red')
ax.set_ylabel('Probability $P_X(x)$')
ax.set_xlabel('Difference in means (x)')
ax.grid(axis='y')
ax.set_yticks([])
x_actual = np.mean(building) - np.mean(face)
mean = mean_diff.mean()
std = mean_diff.std()
z_score = mean - x_actual
z_score /= std
p_value = scipy.special.ndtr(- z_score) * two_sided
print('The two sided p-value is:', p_value)
# ## Part E
# In[53]:
arr_stack = np.hstack((building, face))
arr_bootstrap = bootstrap(arr_stack)
samples1 = arr_bootstrap[:, :len(building)]
samples2 = arr_bootstrap[:, len(building):]
means1 = np.mean(samples1, axis=1)
means2 = np.mean(samples2, axis=1)
sample_diff_means = means1 - means2
sample_mean_dist = | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
# author=yphacker
import os
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
from conf import config
from model.net import Net
from utils.data_utils import MyDataset
from utils.data_utils import test_transform
import torch.nn as nn
import pandas as pd
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def predict(model):
#test_path_list = ['{}/{}.jpg'.format(config.image_test_path, x) for x in range(0, data_len)]
#test_path_list = ['{}/{}'.format(config.image_test_path, x) for x in os.listdir(config.image_test_path)]
csv_path = '/data/beijing/dataset/BOLD/BOLD_public/val.csv'
info = pd.read_csv(csv_path)
test = list(info['filename'])
test_path_list = []
for i in range(len(test)):
if os.path.exists(config.image_test_path + '/' + test[i]):
test_path_list.append(config.image_test_path + '/' + test[i])
#print(test_path_list)
#exit()
test_data = np.array(test_path_list)
test_dataset = MyDataset(test_data, test_transform, 'test')
test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)
model.eval()
pred_list = []
with torch.no_grad():
for batch_x, _ in tqdm(test_loader):
batch_x = batch_x.to(device)
# compute output
probs = model(batch_x)
# preds = torch.argmax(probs, dim=1)
# pred_list += [p.item() for p in preds]
pred_list.extend(probs.cpu().numpy())
#print('----------------------------------------------------------------------')
#print('batch_x = ', batch_x)
#print('probs = ', probs)
#print(np.shape(batch_x))
#print(np.shape(probs))
#print()
#exit()
return pred_list,test_data
def multi_model_predict():
preds_dict = dict()
for model_name in model_name_list:
model = Net(model_name).to(device)
model_save_path = os.path.join(config.model_path, '{}.bin'.format(model_name))
print()
print('model path is : ',model_save_path)
#model_save_path = './save_model/model_1/se_densenet121.bin'
model = nn.DataParallel(model)
model.load_state_dict(torch.load(model_save_path))
pred_list, test_data = predict(model)
test_data = list(test_data)
submission = pd.DataFrame(pred_list)
# submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
submission.to_csv('{}/{}_val.csv'
.format(config.submission_path, model_name), index=False, header=False)
#submission.to_csv('./submission_test/se_densenet121/submission_sedensenet121_val.csv')
preds_dict['{}'.format(model_name)] = pred_list
pred_list = get_pred_list(preds_dict)
#print()
#print(preds_dict.keys())
#a = preds_dict['se_densenet121']
#print(np.shape(a))
#print()
#print(pred_list)
#print(np.shape(pred_list))
#exit()
#submission = pd.DataFrame({"id": test_data, "label": pred_list})
test_data = | pd.DataFrame(test_data) | pandas.DataFrame |
"""
The entry point to running permutation-based statistics.
Usage
-----
run_permutation_stats.run(test_data/registration_test_data/baseline,
data/registration_test_data/mutant,
test_data/stats_test_data/test_output,
1000)
Currently, this module works only with organ volume data. The voxel-based methods are currently too big to do this.
Think about including voxel-based stats in the future
Outline of pipeline
-------------------
Before running the permutation statistics we need to have run jobrunner.py on the baseline and mutant data.
The main function in this module run() calles the following functions during the pipeline:
get_organ_volume_data and get_staging_data
search the registration output folders for the CSVs that contain the organ volumes
and staging data and collate into single csvs.
distributions.null and distributions.alternative
Use the dataframes from the precedding functions to generate null and alternative p-value distributiuon dataframes
p_thresholds.get_thresholds
Using the null and alternative distributions, these functions generate organ-spceific p-value thresholds.
These are generated for both line-level and specimen level calls.
annotate
This function creates final results CSVs.
Puts a line-level csv in the line/output/stats_/
Puts specimen-level csv files in line/output/stats_/specimen_level
"""
from pathlib import Path
from datetime import date
import pandas as pd
import numpy as np
from logzero import logger as logging
import yaml
from lama import common
from lama.stats.permutation_stats import distributions
from lama.stats.permutation_stats import p_thresholds
from lama.paths import specimen_iterator
from lama.qc.organ_vol_plots import make_plots, pvalue_dist_plots
from lama.common import write_array, read_array, init_logging
GENOTYPE_P_COL_NAME = 'genotype_effect_p_value'
PERM_SIGNIFICANT_COL_NAME = 'significant_cal_p'
def get_organ_volume_data(root_dir: Path) -> pd.DataFrame:
"""
Given a root registration directory, collate all the organ volume CSVs into one file.
Write out the combined organ volume CSV into the root registration directory.
Parameters
----------
root_dir
The path to the root registration directory
Returns
-------
The combined data frame of all the organ volumes
specimen id in index organs in rows
"""
output_dir = root_dir / 'output'
dataframes = []
for line_dir, specimen_dir in specimen_iterator(output_dir):
organ_vol_file = specimen_dir / 'output' / common.ORGAN_VOLUME_CSV_FILE
if not organ_vol_file.is_file():
raise FileNotFoundError(f'Cannot find organ volume file {organ_vol_file}')
df = pd.read_csv(organ_vol_file, index_col=0)
if len(df) == 0:
raise ValueError(f'{organ_vol_file} is empty')
dataframes.append(df)
# Write the concatenated organ vol file to single csv
all_organs = pd.concat(dataframes)
# outpath = output_dir / common.ORGAN_VOLUME_CSV_FILE
# all_organs.to_csv(outpath)
return all_organs
def get_staging_data(root_dir: Path) -> pd.DataFrame:
"""
Given a root registration directory, collate all the staging CSVs into one file.
Write out the combined organ volume CSV into the root registration directory.
Parameters
----------
root_dir
The path to the root registration directory
Returns
-------
The combined dataframe of all the organ volumes
"""
output_dir = root_dir / 'output'
dataframes = []
for line_dir, specimen_dir in specimen_iterator(output_dir):
staging_info = specimen_dir / 'output' / common.STAGING_INFO_FILENAME
if not staging_info.is_file():
raise FileNotFoundError(f'Cannot find staging info file {staging_info}')
df = pd.read_csv(staging_info, index_col=0)
df['line'] = line_dir.name
dataframes.append(df)
# Write the concatenated staging info to the
all_staging = pd.concat(dataframes)
outpath = output_dir / common.STAGING_INFO_FILENAME
all_staging.to_csv(outpath)
return all_staging
def annotate(thresholds: pd.DataFrame, lm_results: pd.DataFrame, lines_root_dir: Path, line_level: bool = True,
label_info: Path = None, label_map: Path = None, write_thresholded_inv_labels=False,
fdr_threshold: float=0.05):
"""
Using the p_value thresholds and the linear model p-value results,
create the following CSV files
Line-level results
specimen-level results
Parameters
----------
thresholds
columns label(index), p_thresh, fdr, num_hits_across_all_lines/specimens
lm_results
The alternative distribution
index: line/specimen id
cols: labels (+ line_id for specimen_level)
outdir
The root directory to save the annotated CSV files
line_level
if not True, place results in specimen-level sub directory
label_info
CSV to map label number to name
Notes
-----
Today's date added to the stats output folder in case it's run multiple times,
TODO: Add file number prefixes so we don't overwrite mulyiple analyses done on the same day
TODO: the organ_volumes folder name is hard-coded. What about if we add a new analysis type to the permutation stats pipeline?
"""
collated = []
if label_map:
label_map = read_array(label_map)
for id_, row in lm_results.iterrows():
# Create a dataframe containing p-value column. each organ on rows
df = row.to_frame()
if not line_level:
# specimne-level has an extra line column we need to remove
df = df.T.drop(columns=['line']).T
# Rename the line_specimen column to be more informative
df.rename(columns={id_: GENOTYPE_P_COL_NAME}, inplace=True)
if line_level:
line = id_
else:
line = row['line']
# Merge the permutation results (p-thresh, fdr, number of hit lines fo this label) with the mutant results
df.index = df.index.astype(np.int64) # Index needs to be cast from object to enable merge
df = df.merge(thresholds, left_index=True, right_index=True, validate='1:1')
df.index.name = 'label'
output_name = f'{id_}_organ_volumes_{str(date.today())}.csv'
line_output_dir = lines_root_dir / line
line_output_dir.mkdir(exist_ok=True)
if not line_level:
# If dealing with specimen-level stats, make subfolder to put results in
line_output_dir = line_output_dir / 'specimen_level' / id_
line_output_dir.mkdir(parents=True, exist_ok=True)
output_path = line_output_dir / output_name
add_significance(df, fdr_threshold)
if label_info:
df = add_label_names(df , label_info)
df.to_csv(output_path)
hit_df = df[df['significant_cal_p'] == True]
collated.append(hit_df)
hit_labels_out = line_output_dir / f'{line}__hit_labels.nrrd'
hits = hit_df.index
if write_thresholded_inv_labels:
_write_thresholded_label_map(label_map, hits, hit_labels_out)
return collated
def _write_thresholded_label_map(label_map: np.ndarray, hits, out: Path):
"""
Write a label map with only the 'hit' organs in it
"""
if label_map is None:
return
if len(hits) > 0:
# Make a copy as it may be being used elsewhere
l = np.copy(label_map)
# Clear any non-hits
l[~np.isin(l, hits)] = 0
write_array(l, out)
def add_label_names(df: pd.DataFrame, label_info: Path) -> pd.DataFrame:
label_df = pd.read_csv(label_info, index_col=0)
df = df.merge(right=label_df[['label_name']], left_index=True, right_index=True)
return df
def add_significance(df: pd.DataFrame, threshold: float):
"""
Add a significance column to the output csv in place.
Set significance to True if the genotype p-value is lower than the p threshold for that organ
and the fdr is lower than fdr threshold.
And sort values by significance
"""
df[PERM_SIGNIFICANT_COL_NAME] = (df[GENOTYPE_P_COL_NAME] <= df['p_thresh']) & (df['fdr'] <= threshold)
df.sort_values(by=[PERM_SIGNIFICANT_COL_NAME, GENOTYPE_P_COL_NAME], ascending=[False, True], inplace=True)
def prepare_data(wt_organ_vol: pd.DataFrame,
wt_staging: pd.DataFrame,
mut_organ_vol: pd.DataFrame,
mut_staging: pd.DataFrame,
label_meta: Path = None,
normalise_to_whole_embryo=False) -> pd.DataFrame:
"""
Do some pre-processing on the input DataFrames and concatenate into one data frame.
Normalise organ volumes by whole embryo volume (staging)
Returns
-------
Concatenated data with line, genotype staging + organ volume columns
"""
wt_staging.rename(columns={'value': 'staging'}, inplace=True)
mut_staging.rename(columns={'value': 'staging'}, inplace=True)
wt_staging.index = wt_staging.index.astype(str)
if normalise_to_whole_embryo:
wt_organ_vol = wt_organ_vol.divide(wt_staging['staging'], axis=0)
mut_organ_vol = mut_organ_vol.divide(mut_staging['staging'], axis=0)
logging.info('Normalising organ volume to whole embryo volume')
# merge the organ vol
organ_vols = pd.concat([wt_organ_vol, mut_organ_vol])
# Drop any organ columns that has only zero values. These are the gaps in the label map caused by merging labels
organ_vols = organ_vols.loc[:, (organ_vols != 0).any(axis=0)]
# For the statsmodels linear mode to work, column names cannot start with a digid. Prefix with 'x'
organ_vols.columns = [f'x{x}' if x.isdigit() else x for x in organ_vols.columns]
staging = pd.concat([wt_staging, mut_staging])
# Merge staging to the organvolume dataframe. First drop line so we don't get duplicate entries
# staging.drop(columns=['line'], inplace=True)
data = | pd.concat([organ_vols, staging], axis=1) | pandas.concat |
from tqdm import tqdm
import torch
import numbers
import logging
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader
from pathlib import Path
from farm.evaluation.metrics import compute_metrics, compute_report_metrics
from farm.utils import to_numpy
from farm.utils import MLFlowLogger as MlLogger
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.biadaptive_model import BiAdaptiveModel
from farm.visual.ascii.images import BUSH_SEP
logger = logging.getLogger(__name__)
class Evaluator:
"""Handles evaluation of a given model over a specified dataset."""
def __init__(
self, data_loader, tasks, device, report=True
):
"""
:param data_loader: The PyTorch DataLoader that will return batches of data from the evaluation dataset
:type data_loader: DataLoader
:param label_maps:
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
:param metrics: The list of metrics which need to be computed, one for each prediction head.
:param metrics: list
:param report: Whether an eval report should be generated (e.g. classification report per class).
:type report: bool
"""
self.data_loader = data_loader
self.tasks = tasks
self.device = device
self.report = report
def eval(self, model, return_preds_and_labels=False, calibrate_conf_scores=False):
"""
Performs evaluation on a given model.
:param model: The model on which to perform evaluation
:type model: AdaptiveModel
:param return_preds_and_labels: Whether to add preds and labels in the returned dicts of the
:type return_preds_and_labels: bool
:param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores
:type calibrate_conf_scores: bool
:return all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
and reports generated during evaluation.
:rtype all_results: list of dicts
"""
model.eval()
# init empty lists per prediction head
loss_all = [0 for _ in model.prediction_heads]
preds_all = [[] for _ in model.prediction_heads]
label_all = [[] for _ in model.prediction_heads]
ids_all = [[] for _ in model.prediction_heads]
passage_start_t_all = [[] for _ in model.prediction_heads]
logits_all = [[] for _ in model.prediction_heads]
for step, batch in enumerate(
tqdm(self.data_loader, desc="Evaluating", mininterval=10)
):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
logits = model.forward(**batch)
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
# stack results of all batches per prediction head
for head_num, head in enumerate(model.prediction_heads):
loss_all[head_num] += np.sum(to_numpy(losses_per_head[head_num]))
preds_all[head_num] += list(to_numpy(preds[head_num]))
label_all[head_num] += list(to_numpy(labels[head_num]))
if head.model_type == "span_classification":
ids_all[head_num] += list(to_numpy(batch["id"]))
passage_start_t_all[head_num] += list(to_numpy(batch["passage_start_t"]))
if calibrate_conf_scores:
logits_all[head_num] += list(to_numpy(logits))
# Evaluate per prediction head
all_results = []
for head_num, head in enumerate(model.prediction_heads):
if head.model_type == "multilabel_text_classification":
# converting from string preds back to multi-hot encoding
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer(classes=head.label_list)
# TODO check why .fit() should be called on predictions, rather than on labels
preds_all[head_num] = mlb.fit_transform(preds_all[head_num])
label_all[head_num] = mlb.transform(label_all[head_num])
if head.model_type == "span_classification" and calibrate_conf_scores:
temperature_previous = head.temperature_for_confidence.item()
logger.info(f"temperature used for confidence scores before calibration: {temperature_previous}")
head.calibrate_conf(logits_all[head_num], label_all[head_num])
temperature_current = head.temperature_for_confidence.item()
logger.info(f"temperature used for confidence scores after calibration: {temperature_current}")
temperature_change = (abs(temperature_current - temperature_previous) / temperature_previous) * 100.0
if temperature_change > 50:
logger.warning(f"temperature used for calibration of confidence scores changed by more than {temperature_change} percent")
if hasattr(head, 'aggregate_preds'):
# Needed to convert NQ ids from np arrays to strings
ids_all_str = [x.astype(str) for x in ids_all[head_num]]
ids_all_list = [list(x) for x in ids_all_str]
head_ids = ["-".join(x) for x in ids_all_list]
preds_all[head_num], label_all[head_num] = head.aggregate_preds(preds=preds_all[head_num],
labels=label_all[head_num],
passage_start_t=passage_start_t_all[head_num],
ids=head_ids)
result = {"loss": loss_all[head_num] / len(self.data_loader.dataset),
"task_name": head.task_name}
result.update(
compute_metrics(metric=head.metric, preds=preds_all[head_num], labels=label_all[head_num]
)
)
# Select type of report depending on prediction head output type
if self.report:
try:
result["report"] = compute_report_metrics(head, preds_all[head_num], label_all[head_num])
except:
logger.error(f"Couldn't create eval report for head {head_num} with following preds and labels:"
f"\n Preds: {preds_all[head_num]} \n Labels: {label_all[head_num]}")
result["report"] = "Error"
if return_preds_and_labels:
result["preds"] = preds_all[head_num]
result["labels"] = label_all[head_num]
all_results.append(result)
return all_results
@staticmethod
def log_results(results, dataset_name, steps, logging=True, print=True, num_fold=None):
# Print a header
header = "\n\n"
header += BUSH_SEP + "\n"
header += "***************************************************\n"
if num_fold:
header += f"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
else:
header += f"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
header += "***************************************************\n"
header += BUSH_SEP + "\n"
logger.info(header)
for head_num, head in enumerate(results):
logger.info("\n _________ {} _________".format(head['task_name']))
for metric_name, metric_val in head.items():
# log with ML framework (e.g. Mlflow)
if logging:
if not metric_name in ["preds","labels"] and not metric_name.startswith("_"):
if isinstance(metric_val, numbers.Number):
MlLogger.log_metrics(
metrics={
f"{dataset_name}_{metric_name}_{head['task_name']}": metric_val
},
step=steps,
)
# print via standard python logger
if print:
if metric_name == "report":
if isinstance(metric_val, str) and len(metric_val) > 8000:
metric_val = metric_val[:7500] + "\n ............................. \n" + metric_val[-500:]
logger.info("{}: \n {}".format(metric_name, metric_val))
else:
if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
logger.info("{}: {}".format(metric_name, metric_val))
@staticmethod
def log_results(eval_dir, results, dataset_name, epoch, steps, logging=False, print=True, dframe=True, num_fold=None):
# Print a header
header = "\n\n"
header += BUSH_SEP + "\n"
header += "***************************************************\n"
if num_fold:
header += f"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
else:
header += f"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
header += "***************************************************\n"
header += BUSH_SEP + "\n"
logger.info(header)
df_metrics = pd.DataFrame()
df_report = pd.DataFrame()
for head_num, head in enumerate(results):
logger.info("\n _________ {} _________".format(head['task_name']))
for metric_name, metric_val in head.items():
# log with ML framework (e.g. Mlflow)
if logging:
if not metric_name in ["preds","labels"] and not metric_name.startswith("_"):
if isinstance(metric_val, numbers.Number):
MlLogger.log_metrics(
metrics={
f"{dataset_name}_{metric_name}_{head['task_name']}": metric_val
},
step=steps,
)
# print via standard python logger
if print:
if metric_name == "report":
if isinstance(metric_val, str) and len(metric_val) > 8000:
metric_val = metric_val[:7500] + "\n ............................. \n" + metric_val[-500:]
logger.info("{}: \n {}".format(metric_name, metric_val))
else:
if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
logger.info("{}: {}".format(metric_name, metric_val))
# save results in pandas dataframe
if dframe:
if metric_name == "report":
try:
lines = metric_val.split('\n')
for line in lines[2:]:
row = {}
row_data = line.split()
if len(row_data) == 6:
row['epoch'] = epoch
row['step'] = steps
row['class'] = row_data[0] + " " + row_data[1]
row['precision'] = row_data[2]
row['recall'] = row_data[3]
row['f1_score'] = row_data[4]
row['support'] = row_data[5]
df_tmp = pd.DataFrame(row, index=[0])
df_report = | pd.concat([df_report, df_tmp], ignore_index=True) | pandas.concat |
'''Original implementation at https://github.com/wangtongada/BOA
'''
import itertools
import operator
import os
import warnings
from os.path import join as oj
from bisect import bisect_left
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from random import sample
import numpy as np
import pandas as pd
from mlxtend.frequent_patterns import fpgrowth
from numpy.random import random
from pandas import read_csv
from scipy.sparse import csc_matrix
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_X_y, check_is_fitted
from imodels.rule_set.rule_set import RuleSet
class BayesianRuleSetClassifier(RuleSet, BaseEstimator, ClassifierMixin):
'''Bayesian or-of-and algorithm.
Generates patterns that satisfy the minimum support and maximum length and then select the Nrules rules that have the highest entropy.
In function SA_patternbased, each local maximum is stored in maps and the best BOA is returned.
Remember here the BOA contains only the index of selected rules from Nrules self.rules_
'''
def __init__(self, n_rules: int = 2000,
supp=5, maxlen: int = 10,
num_iterations=5000, num_chains=3, q=0.1,
alpha_pos=100, beta_pos=1,
alpha_neg=100, beta_neg=1,
alpha_l=None, beta_l=None,
discretization_method='randomforest', random_state=0):
'''
Params
------
n_rules
number of rules to be used in SA_patternbased and also the output of generate_rules
supp
The higher this supp, the 'larger' a pattern is. 5% is a generally good number
maxlen
maximum length of a pattern
num_iterations
number of iterations in each chain
num_chains
number of chains in the simulated annealing search algorithm
q
alpha_pos
$\rho = alpha/(alpha+beta)$. Make sure $\rho$ is close to one when choosing alpha and beta
The alpha and beta parameters alter the prior distributions for different rules
beta_pos
alpha_neg
beta_neg
alpha_l
beta_l
discretization_method
discretization method
'''
self.n_rules = n_rules
self.supp = supp
self.maxlen = maxlen
self.num_iterations = num_iterations
self.num_chains = num_chains
self.q = q
self.alpha_pos = alpha_pos
self.beta_pos = beta_pos
self.alpha_neg = alpha_neg
self.beta_neg = beta_neg
self.discretization_method = discretization_method
self.alpha_l = alpha_l
self.beta_l = beta_l
self.random_state = 0
def fit(self, X, y, feature_names: list = None, init=[], verbose=False):
'''
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array_like, shape = [n_samples]
Labels
feature_names : array_like, shape = [n_features], optional (default: [])
String labels for each feature.
If empty and X is a DataFrame, column labels are used.
If empty and X is not a DataFrame, then features are simply enumerated
'''
# check inputs
self.attr_level_num = defaultdict(int) # any missing value defaults to 0
self.attr_names = []
# get feature names
if feature_names is None:
if isinstance(X, pd.DataFrame):
feature_names = X.columns
else:
feature_names = ['X' + str(i) for i in range(X.shape[1])]
# checks
X, y = check_X_y(X, y) # converts df to ndarray
check_classification_targets(y)
assert len(feature_names) == X.shape[1], 'feature_names should be same size as X.shape[1]'
np.random.seed(self.random_state)
# convert to pandas DataFrame
X = pd.DataFrame(X, columns=feature_names)
for i, name in enumerate(X.columns):
self.attr_level_num[name] += 1
self.attr_names.append(name)
self.attr_names_orig = deepcopy(self.attr_names)
self.attr_names = list(set(self.attr_names))
# set up patterns
self._set_pattern_space()
# parameter checking
if self.alpha_l is None or self.beta_l is None or len(self.alpha_l) != self.maxlen or len(
self.beta_l) != self.maxlen:
if verbose:
print('No or wrong input for alpha_l and beta_l - the model will use default parameters.')
self.C = [1.0 / self.maxlen] * self.maxlen
self.C.insert(0, -1)
self.alpha_l = [10] * (self.maxlen + 1)
self.beta_l = [10 * self.pattern_space[i] / self.C[i] for i in range(self.maxlen + 1)]
else:
self.alpha_l = [1] + list(self.alpha_l)
self.beta_l = [1] + list(self.beta_l)
# setup
self._generate_rules(X, y, verbose)
n_rules_current = len(self.rules_)
self.rules_len_list = [len(rule) for rule in self.rules_]
maps = defaultdict(list)
T0 = 1000 # initial temperature for simulated annealing
split = 0.7 * self.num_iterations
# run simulated annealing
for chain in range(self.num_chains):
# initialize with a random pattern set
if init != []:
rules_curr = init.copy()
else:
assert n_rules_current > 1, f'Only {n_rules_current} potential rules found, change hyperparams to allow for more'
N = sample(range(1, min(8, n_rules_current), 1), 1)[0]
rules_curr = sample(range(n_rules_current), N)
rules_curr_norm = self._normalize(rules_curr)
pt_curr = -100000000000
maps[chain].append(
[-1, [pt_curr / 3, pt_curr / 3, pt_curr / 3], rules_curr, [self.rules_[i] for i in rules_curr]])
for iter in range(self.num_iterations):
if iter >= split:
p = np.array(range(1 + len(maps[chain])))
p = np.array(list(_accumulate(p)))
p = p / p[-1]
index = _find_lt(p, random())
rules_curr = maps[chain][index][2].copy()
rules_curr_norm = maps[chain][index][2].copy()
# propose new rules
rules_new, rules_norm = self._propose(rules_curr.copy(), rules_curr_norm.copy(), self.q, y)
# compute probability of new rules
cfmatrix, prob = self._compute_prob(rules_new, y)
T = T0 ** (1 - iter / self.num_iterations) # temperature for simulated annealing
pt_new = sum(prob)
with warnings.catch_warnings():
if not verbose:
warnings.simplefilter("ignore")
alpha = np.exp(float(pt_new - pt_curr) / T)
if pt_new > sum(maps[chain][-1][1]):
maps[chain].append([iter, prob, rules_new, [self.rules_[i] for i in rules_new]])
if verbose:
print((
'\n** chain = {}, max at iter = {} ** \n accuracy = {}, TP = {},FP = {}, TN = {}, FN = {}'
'\n pt_new is {}, prior_ChsRules={}, likelihood_1 = {}, likelihood_2 = {}\n').format(
chain, iter, (cfmatrix[0] + cfmatrix[2] + 0.0) / len(y), cfmatrix[0], cfmatrix[1],
cfmatrix[2], cfmatrix[3], sum(prob), prob[0], prob[1], prob[2])
)
self._print_rules(rules_new)
print(rules_new)
if random() <= alpha:
rules_curr_norm, rules_curr, pt_curr = rules_norm.copy(), rules_new.copy(), pt_new
pt_max = [sum(maps[chain][-1][1]) for chain in range(self.num_chains)]
index = pt_max.index(max(pt_max))
self.rules_ = maps[index][-1][3]
return self
def __str__(self):
return ' '.join(str(r) for r in self.rules_)
def predict(self, X):
check_is_fitted(self)
if isinstance(X, np.ndarray):
df = | pd.DataFrame(X, columns=self.attr_names_orig) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
from scipy import stats
import tqdm
import prot.size as size
# Load the data quantifying absolute protein synthesis rates.
counts = | pd.read_csv('../../../data/peebo2015_raw_data/peebo2014_copynums_minimal.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert (np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert (np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert (not result.any())
result = isnull([u('foo'), u('bar')])
assert (not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert (np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert (np.array_equal(result, exp))
def test_isnull_numpy_nat():
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert (notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert (mask[0])
assert (not mask[1:].any())
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert (mask[0])
assert (not mask[1:].any())
mask = isnull(pidx[1:])
assert (not mask.any())
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
| TimedeltaIndex([0, np.nan]) | pandas.TimedeltaIndex |
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
@pytest.fixture
def idx_expected(self):
idx = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B").tz_localize(
"US/Pacific"
)
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
assert expected.dtype == idx.dtype
return idx, expected
def test_to_series_keep_tz_deprecated_true(self, idx_expected):
# convert to series while keeping the timezone
idx, expected = idx_expected
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
assert msg in str(m[0].message)
tm.assert_series_equal(result, expected)
def test_to_series_keep_tz_deprecated_false(self, idx_expected):
idx, expected = idx_expected
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
def test_setitem_dt64series(self, idx_expected):
# convert to utc
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import sys
import time
import pandas as pd
import numpy as np
import copyreg, types
from tqdm import tqdm
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-talk')
plt.style.use('bmh')
#plt.rcParams['font.family'] = 'DejaVu Sans Mono'
plt.rcParams['font.size'] = 9.5
plt.rcParams['font.weight'] = 'medium'
# =======================================================
# Symmetric CUSUM Filter [2.5.2.1]
def getTEvents(gRaw, h):
"""cusum filter
args
----
gRaw: array-like
h: int() or float()
returns
-------
pd.DatetimeIndex()
"""
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna().abs()
for i in tqdm(diff.index[1:]):
try:
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
except Exception as e:
print(e)
print(sPos+diff.loc[i], type(sPos+diff.loc[i]))
print(sNeg+diff.loc[i], type(sNeg+diff.loc[i]))
break
sPos, sNeg=max(0., pos), min(0., neg)
if sNeg<-h:
sNeg=0;tEvents.append(i)
elif sPos>h:
sPos=0;tEvents.append(i)
return pd.DatetimeIndex(tEvents)
# =======================================================
# Daily Volatility Estimator [3.1]
## for wtvr reason dates are not aligned for return calculation
## must account for it for computation
def getDailyVol(close,span0=100):
# daily vol reindexed to close
df0=close.index.searchsorted(close.index-pd.Timedelta(days=1))
#bp()
df0=df0[df0>0]
#bp()
df0=(pd.Series(close.index[df0-1],
index=close.index[close.shape[0]-df0.shape[0]:]))
#bp()
try:
df0=close.loc[df0.index]/close.loc[df0.values].values-1 # daily rets
except Exception as e:
print(e)
print('adjusting shape of close.loc[df0.index]')
cut = close.loc[df0.index].shape[0] - close.loc[df0.values].shape[0]
df0=close.loc[df0.index].iloc[:-cut]/close.loc[df0.values].values-1
df0=df0.ewm(span=span0).std().rename('dailyVol')
return df0
# =======================================================
# Triple-Barrier Labeling Method [3.2]
def applyPtSlOnT1(close,events,ptSl,molecule):
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_=events.loc[molecule]
out=events_[['t1']].copy(deep=True)
if ptSl[0]>0: pt=ptSl[0]*events_['trgt']
else: pt=pd.Series(index=events.index) # NaNs
if ptSl[1]>0: sl=-ptSl[1]*events_['trgt']
else: sl=pd.Series(index=events.index) # NaNs
for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0=close[loc:t1] # path prices
df0=(df0/close[loc]-1)*events_.at[loc,'side'] # path returns
out.loc[loc,'sl']=df0[df0<sl[loc]].index.min() # earliest stop loss
out.loc[loc,'pt']=df0[df0>pt[loc]].index.min() # earliest profit taking
return out
# =======================================================
# Gettting Time of First Touch (getEvents) [3.3]
def getEvents(close, tEvents, ptSl, trgt, minRet, numThreads,t1=False, side=None):
#1) get target
trgt=trgt.loc[tEvents]
trgt=trgt[trgt>minRet] # minRet
#2) get t1 (max holding period)
if t1 is False:t1=pd.Series(pd.NaT, index=tEvents)
#3) form events object, apply stop loss on t1
if side is None:side_,ptSl_=pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]
else: side_,ptSl_=side.loc[trgt.index],ptSl[:2]
events=(pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1)
.dropna(subset=['trgt']))
df0=mpPandasObj(func=applyPtSlOnT1,pdObj=('molecule',events.index),
numThreads=numThreads,close=close,events=events,
ptSl=ptSl_)
events['t1']=df0.dropna(how='all').min(axis=1) #pd.min ignores nan
if side is None:events=events.drop('side',axis=1)
return events
# =======================================================
# Adding Vertical Barrier [3.4]
def addVerticalBarrier(tEvents, close, numDays=1):
t1=close.index.searchsorted(tEvents+pd.Timedelta(days=numDays))
t1=t1[t1<close.shape[0]]
t1=(pd.Series(close.index[t1],index=tEvents[:t1.shape[0]]))
return t1
# =======================================================
# Labeling for side and size [3.5, 3.8]
def getBins(events, close, t1=None):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
-t1 is original vertical barrier series
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
# 1) prices aligned with events
events_ = events.dropna(subset=['t1'])
px = events_.index.union(events_['t1'].values).drop_duplicates()
px = close.reindex(px, method='bfill')
# 2) create out object
out = pd.DataFrame(index=events_.index)
out['ret'] = px.loc[events_['t1'].values].values / px.loc[
events_.index] - 1
if 'side' in events_: out['ret'] *= events_['side'] # meta-labeling
out['bin'] = np.sign(out['ret'])
if 'side' not in events_:
# only applies when not meta-labeling.
# to update bin to 0 when vertical barrier is touched, we need the
# original vertical barrier series since the events['t1'] is the time
# of first touch of any barrier and not the vertical barrier
# specifically. The index of the intersection of the vertical barrier
# values and the events['t1'] values indicate which bin labels needs
# to be turned to 0.
vtouch_first_idx = events[events['t1'].isin(t1.values)].index
out.loc[vtouch_first_idx, 'bin'] = 0.
if 'side' in events_: out.loc[out['ret'] <= 0, 'bin'] = 0 # meta-labeling
return out
# =======================================================
# Expanding getBins to Incorporate Meta-Labeling [3.7]
def getBinsOld(events, close):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:out['ret']*=events_['side'] # meta-labeling
out['bin']=np.sign(out['ret'])
if 'side' in events_:out.loc[out['ret']<=0,'bin']=0 # meta-labeling
return out
# =======================================================
# Dropping Unnecessary Labels [3.8]
def dropLabels(events, minPct=.05):
# apply weights, drop labels with insufficient examples
while True:
df0=events['bin'].value_counts(normalize=True)
if df0.min()>minPct or df0.shape[0]<3:break
print('dropped label: ', df0.argmin(),df0.min())
events=events[events['bin']!=df0.argmin()]
return events
# =======================================================
# Linear Partitions [20.4.1]
def linParts(numAtoms,numThreads):
# partition of atoms with a single loop
parts=np.linspace(0,numAtoms,min(numThreads,numAtoms)+1)
parts=np.ceil(parts).astype(int)
return parts
def nestedParts(numAtoms,numThreads,upperTriang=False):
# partition of atoms with an inner loop
parts,numThreads_=[0],min(numThreads,numAtoms)
for num in range(numThreads_):
part=1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)
part=(-1+part**.5)/2.
parts.append(part)
parts=np.round(parts).astype(int)
if upperTriang: # the first rows are heaviest
parts=np.cumsum(np.diff(parts)[::-1])
parts=np.append(np.array([0]),parts)
return parts
# =======================================================
# multiprocessing snippet [20.7]
def mpPandasObj(func,pdObj,numThreads=24,mpBatches=1,linMols=True,**kargs):
'''
Parallelize jobs, return a dataframe or series
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
'''
import pandas as pd
#if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
#else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:parts=linParts(len(pdObj[1]),numThreads*mpBatches)
else:parts=nestedParts(len(pdObj[1]),numThreads*mpBatches)
jobs=[]
for i in range(1,len(parts)):
job={pdObj[0]:pdObj[1][parts[i-1]:parts[i]],'func':func}
job.update(kargs)
jobs.append(job)
if numThreads==1:out=processJobs_(jobs)
else: out=processJobs(jobs,numThreads=numThreads)
if isinstance(out[0],pd.DataFrame):df0=pd.DataFrame()
elif isinstance(out[0],pd.Series):df0=pd.Series()
else:return out
for i in out:df0=df0.append(i)
df0=df0.sort_index()
return df0
# =======================================================
# single-thread execution for debugging [20.8]
def processJobs_(jobs):
# Run jobs sequentially, for debugging
out=[]
for job in jobs:
out_=expandCall(job)
out.append(out_)
return out
# =======================================================
# Example of async call to multiprocessing lib [20.9]
import multiprocessing as mp
import datetime as dt
#________________________________
def reportProgress(jobNum,numJobs,time0,task):
# Report progress as asynch jobs are completed
msg=[float(jobNum)/numJobs, (time.time()-time0)/60.]
msg.append(msg[1]*(1/msg[0]-1))
timeStamp=str(dt.datetime.fromtimestamp(time.time()))
msg=timeStamp+' '+str(round(msg[0]*100,2))+'% '+task+' done after '+ \
str(round(msg[1],2))+' minutes. Remaining '+str(round(msg[2],2))+' minutes.'
if jobNum<numJobs:sys.stderr.write(msg+'\r')
else:sys.stderr.write(msg+'\n')
return
#________________________________
def processJobs(jobs,task=None,numThreads=24):
# Run in parallel.
# jobs must contain a 'func' callback, for expandCall
if task is None:task=jobs[0]['func'].__name__
pool=mp.Pool(processes=numThreads)
outputs,out,time0=pool.imap_unordered(expandCall,jobs),[],time.time()
# Process asyn output, report progress
for i,out_ in enumerate(outputs,1):
out.append(out_)
reportProgress(i,len(jobs),time0,task)
pool.close();pool.join() # this is needed to prevent memory leaks
return out
# =======================================================
# Unwrapping the Callback [20.10]
def expandCall(kargs):
# Expand the arguments of a callback function, kargs['func']
func=kargs['func']
del kargs['func']
out=func(**kargs)
return out
# =======================================================
# Pickle Unpickling Objects [20.11]
def _pickle_method(method):
func_name=method.im_func.__name__
obj=method.im_self
cls=method.im_class
return _unpickle_method, (func_name,obj,cls)
#________________________________
def _unpickle_method(func_name,obj,cls):
for cls in cls.mro():
try:func=cls.__dict__[func_name]
except KeyError:pass
else:break
return func.__get__(obj,cls)
#________________________________
# =======================================================
# Estimating uniqueness of a label [4.1]
def mpNumCoEvents(closeIdx,t1,molecule):
'''
Compute the number of concurrent events per bar.
+molecule[0] is the date of the first event on which the weight will be computed
+molecule[-1] is the date of the last event on which the weight will be computed
Any event that starts before t1[modelcule].max() impacts the count.
'''
#1) find events that span the period [molecule[0],molecule[-1]]
t1=t1.fillna(closeIdx[-1]) # unclosed events still must impact other weights
t1=t1[t1>=molecule[0]] # events that end at or after molecule[0]
t1=t1.loc[:t1[molecule].max()] # events that start at or before t1[molecule].max()
#2) count events spanning a bar
iloc=closeIdx.searchsorted(np.array([t1.index[0],t1.max()]))
count=pd.Series(0,index=closeIdx[iloc[0]:iloc[1]+1])
for tIn,tOut in t1.iteritems():count.loc[tIn:tOut]+=1.
return count.loc[molecule[0]:t1[molecule].max()]
# =======================================================
# Estimating the average uniqueness of a label [4.2]
def mpSampleTW(t1,numCoEvents,molecule):
# Derive avg. uniqueness over the events lifespan
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(1./numCoEvents.loc[tIn:tOut]).mean()
return wght
# =======================================================
# Sequential Bootstrap [4.5.2]
## Build Indicator Matrix [4.3]
def getIndMatrix(barIx,t1):
# Get Indicator matrix
indM=(pd.DataFrame(0,index=barIx,columns=range(t1.shape[0])))
for i,(t0,t1) in enumerate(t1.iteritems()):indM.loc[t0:t1,i]=1.
return indM
# =======================================================
# Compute average uniqueness [4.4]
def getAvgUniqueness(indM):
# Average uniqueness from indicator matrix
c=indM.sum(axis=1) # concurrency
u=indM.div(c,axis=0) # uniqueness
avgU=u[u>0].mean() # avg. uniqueness
return avgU
# =======================================================
# return sample from sequential bootstrap [4.5]
def seqBootstrap(indM,sLength=None):
# Generate a sample via sequential bootstrap
if sLength is None:sLength=indM.shape[1]
phi=[]
while len(phi)<sLength:
avgU=pd.Series()
for i in indM:
indM_=indM[phi+[i]] # reduce indM
avgU.loc[i]=getAvgUniqueness(indM_).iloc[-1]
prob=avgU/avgU.sum() # draw prob
phi+=[np.random.choice(indM.columns,p=prob)]
return phi
# =======================================================
# Determination of sample weight by absolute return attribution [4.10]
def mpSampleW(t1,numCoEvents,close,molecule):
# Derive sample weight by return attribution
ret=np.log(close).diff() # log-returns, so that they are additive
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(ret.loc[tIn:tOut]/numCoEvents.loc[tIn:tOut]).sum()
return wght.abs()
# =======================================================
# fractionally differentiated features snippets
# =======================================================
# get weights
def getWeights(d,size):
# thres>0 drops insignificant weights
w=[1.]
for k in range(1,size):
w_ = -w[-1]/k*(d-k+1)
w.append(w_)
w=np.array(w[::-1]).reshape(-1,1)
return w
def getWeights_FFD(d,thres):
w,k=[1.],1
while True:
w_=-w[-1]/k*(d-k+1)
if abs(w_)<thres:break
w.append(w_);k+=1
return np.array(w[::-1]).reshape(-1,1)
# =======================================================
# expanding window fractional differentiation
def fracDiff(series, d, thres=0.01):
'''
Increasing width window, with treatment of NaNs
Note 1: For thres=1, nothing is skipped
Note 2: d can be any positive fractional, not necessarily
bounded between [0,1]
'''
#1) Compute weights for the longest series
w=getWeights(d, series.shape[0])
#2) Determine initial calcs to be skipped based on weight-loss threshold
w_=np.cumsum(abs(w))
w_ /= w_[-1]
skip = w_[w_>thres].shape[0]
#3) Apply weights to values
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc in range(skip, seriesF.shape[0]):
loc=seriesF.index[iloc]
if not np.isfinite(series.loc[loc,name]).any():continue # exclude NAs
try:
df_.loc[loc]=np.dot(w[-(iloc+1):,:].T, seriesF.loc[:loc])[0,0]
except:
continue
df[name]=df_.copy(deep=True)
df=pd.concat(df,axis=1)
return df
# =======================================================
# fixed-width window fractional differentiation
def fracDiff_FFD(series,d,thres=1e-5):
# Constant width window (new solution)
w = getWeights_FFD(d,thres)
width = len(w)-1
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc1 in range(width,seriesF.shape[0]):
loc0,loc1=seriesF.index[iloc1-width], seriesF.index[iloc1]
test_val = series.loc[loc1,name] # must resample if duplicate index
if isinstance(test_val, (pd.Series, pd.DataFrame)):
test_val = test_val.resample('1m').mean()
if not np.isfinite(test_val).any(): continue # exclude NAs
try:
df_.loc[loc1]=np.dot(w.T, seriesF.loc[loc0:loc1])[0,0]
except:
continue
df[name]=df_.copy(deep=True)
df=pd.concat(df,axis=1)
return df
"""
def fracDiff_FFD(series,d,thres=1e-5):
'''
Constant width window (new solution)
Note 1: thres determines the cut-off weight for the window
Note 2: d can be any positive fractional, not necessarily
bounded [0,1].
'''
#1) Compute weights for the longest series
w=getWeights_FFD(d, thres) ## WHERE IS THIS FUNCTION IN THE BOOK
width=len(w)-1
#2) Apply weights to values
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc1 in range(width,seriesF.shape[0]):
loc0,loc1=seriesF.index[iloc1-width], seriesF.index[iloc1]
if not np.isfinite(series.loc[loc1,name]): continue # exclude NAs
df_.loc[loc1]=np.dot(w.T, seriesF.loc[loc0:loc1])[0,0]
df[name]=df_.copy(deep=True)
df=pd.concat(df,axis=1)
return df
"""
# =======================================================
# finding the min. D value that passes ADF test
def plotMinFFD(df0, thres=1e-5):
# pg. 85
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
out=pd.DataFrame(columns=['adfStat','pVal','lags','nObs','95% conf','corr'])
for d in np.linspace(0,1,11):
df1=np.log(df0[['close']]).resample('1D').last() # downcast to daily obs
df2=fracDiff_FFD(df1,d,thres=thres)
corr=np.corrcoef(df1.loc[df2.index,'close'],df2['close'])[0,1]
df2=adfuller(df2['close'],maxlag=1,regression='c',autolag=None)
out.loc[d]=list(df2[:4])+[df2[4]['5%']]+[corr] # with critical value
f,ax=plt.subplots(figsize=(9,5))
out[['adfStat','corr']].plot(ax=ax, secondary_y='adfStat')
plt.axhline(out['95% conf'].mean(),linewidth=1,color='r',linestyle='dotted')
return out
# =======================================================
# Modeling snippets
# =======================================================
# =======================================================
# Purging observations in the training set (7.1)
def getTrainTimes(t1,testTimes):
"""
Given testTimes, find the times of the training observations
-t1.index: Time when the observation started
-t1.value: Time when the observation ended
-testTimes: Times of testing observations
"""
trn=t1.copy(deep=True)
for i,j in testTimes.iteritems():
df0=trn[(i<=trn.index)&(trn.index<=j)].index # train starts within test
df1=trn[(i<=trn)&(trn<=j)].index # train ends within test
df2=trn[(trn.index<=i)&(j<=trn)].index # train envelops test
trn=trn.drop(df0.union(df1).union(df2))
return trn
# =======================================================
# Embargo on Training Observations (7.2)
def getEmbargoTimes(times,pctEmbargo):
# Get embargo time for each bar
step=int(times.shape[0]*pctEmbargo)
if step==0:
mbrg=pd.Series(times,index=times)
else:
mbrg=pd.Series(times[step:],index=times[:-step])
mbrg=mbrg.append(pd.Series(times[-1],index=times[-step:]))
return mbrg
## Examples
# testtimes=pd.Series(mbrg[dt1],index=[dt0]) # include embargo before purge
# trainTimes=getTrainTimes(t1,testTimes)
# testTimes=t1.loc[dt0:dt1].index
# =======================================================
# Cross-validation class when observations overlap (7.3)
from sklearn.model_selection._split import _BaseKFold
class PurgedKFold(_BaseKFold):
"""
Extend KFold class to work with labels that span intervals
The train is purged of observations overlapping test-label intervals
Test set is assumed contiguous (shuffle=False), w/o training samples in between
"""
def __init__(self,n_splits=3,t1=None,pctEmbargo=0.):
if not isinstance(t1,pd.Series):
raise ValueError('Label Through Dates must be a pd.Series')
super(PurgedKFold,self).__init__(n_splits,shuffle=False,random_state=None)
self.t1=t1
self.pctEmbargo=pctEmbargo
def split(self,X,y=None,groups=None):
if (X.index==self.t1.index).sum()!=len(self.t1):
raise ValueError('X and ThruDateValues must have the same index')
# TODO: grouping function combinations insert here??
# manage groups by using label in dataframe?
# use combinations + group label to split into chunks??
indices=np.arange(X.shape[0])
mbrg=int(X.shape[0]*self.pctEmbargo)
test_starts=[
(i[0],i[-1]+1) for i in np.array_split(np.arange(X.shape[0]),
self.n_splits)
]
for i,j in test_starts:
t0=self.t1.index[i] # start of test set
test_indices=indices[i:j]
maxT1Idx=self.t1.index.searchsorted(self.t1[test_indices].max())
train_indices=self.t1.index.searchsorted(self.t1[self.t1<=t0].index)
if maxT1Idx<X.shape[0]: # right train ( with embargo)
train_indices=np.concatenate((train_indices, indices[maxT1Idx+mbrg:]))
yield train_indices,test_indices
# =======================================================
# CV score implements purgedKfold & embargo (7.4)
def cvScore(clf,X,y,sample_weight,scoring='neg_log_loss',
t1=None,cv=None,cvGen=None,pctEmbargo=None):
if scoring not in ['neg_log_loss','accuracy']:
raise Exception('wrong scoring method.')
from sklearn.metrics import log_loss,accuracy_score
idx = pd.IndexSlice
if cvGen is None:
cvGen=PurgedKFold(n_splits=cv,t1=t1,pctEmbargo=pctEmbargo) # purged
score=[]
for train,test in cvGen.split(X=X):
fit=clf.fit(X=X.iloc[idx[train],:],y=y.iloc[idx[train]],
sample_weight=sample_weight.iloc[idx[train]].values)
if scoring=='neg_log_loss':
prob=fit.predict_proba(X.iloc[idx[test],:])
score_=-log_loss(y.iloc[idx[test]], prob,
sample_weight=sample_weight.iloc[idx[test]].values,
labels=clf.classes_)
else:
pred=fit.predict(X.iloc[idx[test],:])
score_=accuracy_score(y.iloc[idx[test]],pred,
sample_weight=sample_weight.iloc[idx[test]].values)
score.append(score_)
return np.array(score)
# =======================================================
# Plot ROC-AUC for purgedKFold
def crossValPlot(skf,classifier,X,y):
"""Code adapted from:
sklearn crossval example
"""
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from scipy import interp
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
idx = pd.IndexSlice
f,ax = plt.subplots(figsize=(10,7))
i = 0
for train, test in skf.split(X, y):
probas_ = (classifier.fit(X.iloc[idx[train]], y.iloc[idx[train]])
.predict_proba(X.iloc[idx[test]]))
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[idx[test]], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
ax.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic example')
ax.legend(bbox_to_anchor=(1,1))
#=======================================================
# Feature Importance snippets
#=======================================================
#=======================================================
# 8.2 Mean Decrease Impurity (MDI)
def featImpMDI(fit,featNames):
# feat importance based on IS mean impurity reduction
# only works with tree based classifiers
df0={i:tree.feature_importances_ for i,tree
in enumerate(fit.estimators_)}
df0=pd.DataFrame.from_dict(df0,orient='index')
df0.columns=featNames
df0=df0.replace(0,np.nan) # b/c max_features=1
imp=(pd.concat({'mean':df0.mean(),
'std':df0.std()*df0.shape[0]**-0.5},
axis=1))
imp/=imp['mean'].sum()
return imp
#=======================================================
# 8.3 Mean Decrease Accuracy (MDA)
def featImpMDA(clf,X,y,cv,sample_weight,t1,pctEmbargo,scoring='neg_log_loss'):
# feat imporant based on OOS score reduction
if scoring not in ['neg_log_loss','accuracy']:
raise ValueError('wrong scoring method.')
from sklearn.metrics import log_loss, accuracy_score
cvGen=PurgedKFold(n_splits=cv,t1=t1,pctEmbargo=pctEmbargo) # purged cv
scr0,scr1=pd.SEries(), pd.DataFrame(columns=X.columns)
for i,(train,test) in enumerate(cvGen.split(X=X)):
X0,y0,w0=X.iloc[train,:],y.iloc[train],sample_weight.iloc[train]
X1,y1,w1=X.iloc[test,:],y.iloc[test],sample_weight.iloc[test]
fit=clf.fit(X=X0,y=y0,sample_weight=w0.values)
if scoring=='neg_log_loss':
prob=fit.predict_proba(X1)
scr0.loc[i]=-log_loss(y1,prob,sample_weight=w1.values,
labels=clf.classes_)
else:
pred=fit.predict(X1)
scr0.loc[i]=accuracy_score(y1,pred,sample_weight=w1.values)
for j in X.columns:
X1_=X1.copy(deep=True)
np.random.shuffle(X1_[j].values) # permutation of a single column
if scoring=='neg_log_loss':
prob=fit.predict_proba(X1_)
scr1.loc[i,j]=-log_loss(y1,prob,sample_weight=w1.values,
labels=clf.classes_)
else:
pred=fit.predict(X1_)
scr1.loc[i,j]=accuracy_score(y1,pred,sample_weight=w1.values)
imp=(-scr1).add(scr0,axis=0)
if scoring=='neg_log_loss':imp=imp/-scr1
else: imp=imp/(1.-scr1)
imp=(pd.concat({'mean':imp.mean(),
'std':imp.std()*imp.shape[0]**-0.5},
axis=1))
return imp,scr0.mean()
#=======================================================
# 8.4 Single Feature Importance (SFI)
def auxFeatImpSFI(featNames,clf,trnsX,cont,scoring,cvGen):
imp=pd.DataFrame(columns=['mean','std'])
for featName in featNames:
df0=cvScore(clf,X=trnsX[[featName]],y=cont['bin'],
sample_weight=cont['w'],scoring=scoring,cvGen=cvGen)
imp.loc[featName,'mean']=df0.mean()
imp.loc[featName,'std']=df0.std()*df0.shape[0]**-0.5
return imp
#=======================================================
# 8.5 Computation of Orthogonal Features
def get_eVec(dot,varThres):
# compute eVec from dot proc matrix, reduce dimension
eVal,eVec=np.linalg.eigh(dot)
idx=eVal.argsort()[::-1] # arugments for sorting eVal desc.
eVal,eVec=eVal[idx],eVec[:,idx]
#2) only positive eVals
eVal=(pd.Series(eVal,index=['PC_'+str(i+1)
for i in range(eVal.shape[0])]))
eVec=(pd.DataFrame(eVec,index=dot.index,columns=eVal.index))
eVec=eVec.loc[:,eVal.index]
#3) reduce dimension, form PCs
cumVar=eVal.cumsum()/eVal.sum()
dim=cumVar.values.searchsorted(varThres)
eVal,eVec=eVal.iloc[:dim+1],eVec.iloc[:,:dim+1]
return eVal,eVec
def orthoFeats(dfx,varThres=0.95):
# given a DataFrame, dfx, of features, compute orthofeatures dfP
dfZ=dfx.sub(dfx.mean(),axis=1).div(dfx.std(),axis=1) # standardize
dot=(pd.DataFrame(np.dot(dfZ.T,dfZ),
index=dfx.columns,
columns=dfx.columns))
eVal,eVec=get_eVec(dot,varThres)
dfP=np.dot(dfZ,eVec)
return dfP
#=======================================================
# 8.6 Computation of weighted kendall's tau between feature importance and inverse PCA ranking
#from scipy.stats import weightedtau
#featImp=np.array([0.55,0.33,0.07,0.05]) # feature importance
#pcRank=np.array([1,2,3,4],dtype=np.float) # PCA rank
#weightedtau(featImp,pcRank**-1)[0]
#=======================================================
# 8.7 Creating a Synthetic Dataset
def getTestData(n_features=40,n_informative=10,n_redundant=10,n_samples=10_000):
# generate a random dataset for a classification problem
from sklearn.datasets import make_classification
kwds=dict(n_samples=n_samples,n_features=n_feautres,
n_informative=n_informative,n_redundant=n_redundant,
random_state=0,shuffle=False)
trnsX,cont=make_classification(**kwds)
df0=(pd.DatetimeIndex(periods=n_samples, freq=pd.tseries.offsets.BDay(),
end=pd.datetime.today()))
trnsX,cont=(pd.DataFrame(trnsX,index=df0),
| pd.Series(cont,index=df0) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 8 18:31:16 2020
@author: arti
"""
import pandas as pd
df = | pd.read_csv('stock-data.csv') | pandas.read_csv |
# Download images using csv of ids:
# python download_image_by_ids.py ~/data/iMaterialist/train.json ~/data/iMaterialist/train_30k ~/data/iMaterialist/train_30k_labels.csv
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import sys
import json
import urllib3
import multiprocessing
import pandas as pd
from PIL import Image
from tqdm import tqdm
from urllib3.util import Retry
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def load_dataset(_dataset, _outdir, _filter_ids):
with open(_dataset, 'r') as f:
data_dict = json.load(f)
imgs = pd.DataFrame(data_dict['images'])
imgs.columns = ['id', 'url']
imgs['fn'] = _outdir + imgs['id'] + '.jpg'
imgs = imgs.loc[imgs['id'].isin(_filter_ids), :]
return imgs
def format_urls_for_download(_data_df):
_fnames_urls = _data_df[['fn', 'url']].values.tolist()
return _fnames_urls
def download_image(fnames_and_urls):
"""
download image and save its with 90% quality as JPG format
skip image downloading if image already exists at given path
:param fnames_and_urls: tuple containing absolute path and url of image
"""
fname, url = fnames_and_urls
if not os.path.exists(fname):
http = urllib3.PoolManager(retries=Retry(connect=3, read=2, redirect=3))
response = http.request("GET", url)
image = Image.open(io.BytesIO(response.data))
image_rgb = image.convert("RGB")
image_rgb.save(fname, format='JPEG', quality=90)
if __name__ == '__main__':
if len(sys.argv) < 3:
print("error: not enough arguments")
sys.exit(0)
# get args and create output directory
train_json = sys.argv[1]
outdir = sys.argv[2]
if not outdir.endswith('/'):
outdir += '/'
labels = | pd.read_csv(sys.argv[3], dtype=str) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import utm
from shapely import wkt
import csv
from PointCloudVisualization import convert_fusedata,readFusedata_toDF
# In[2]:
def readxyz(filename,extra = []):
data = []
with open(filename) as f:
line = f.readline()
while line:
line = line.rstrip("\n")
d = line.split(",")
data.append(d)
line = f.readline()
a = np.array(data)[0]
x_min = float(a[0])
y_min = float(a[1])
z_min = float(a[2])
number = letter = 0
if len(extra) > 0:
number = float(a[3])
letter = str(a[4])
return x_min,y_min,z_min,number,letter
# In[3]:
def readXYZdata_toDF(namedf,ignoreline = False, delimeter = " "):
data = []
with open(namedf) as f:
line = f.readline()
while line:
if ignoreline == True:
line = f.readline()
ignoreline = False
continue
d = line.split(delimeter)
data.append(d)
line = f.readline()
a = np.array(data)
df_PointCloud = pd.DataFrame()
df_PointCloud["East"] = | pd.to_numeric(a[:,0]) | pandas.to_numeric |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.array([1, 2], dtype="Int64"))
b = Series(to_decimal([1, 2]))
result = concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_ordered_dict(self):
# GH 21510
expected = concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = concat({"First": Series(range(3)), "Another": Series(range(4))})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("pdt", [Series, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = concat([df1, df2])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import covasim as cv
import covasim.defaults as cvd
import covasim.utils as cvu
import numba as nb
import numpy as np
import pandas as pd
def generate_people(n_people: int, n_contacts: int, dispersion=None) -> cv.People:
people = cv.People(pars={'pop_size': n_people})
people.contacts['a'] = RandomLayer(people.indices(), n_contacts, dispersion)
people.age = np.zeros(int(n_people))+40
return people
class RandomLayer(cv.Layer):
"""
Generate and dynamically update random layer
"""
def __init__(self, inds, mean_contacts, dispersion=None, dynamic=False):
super().__init__()
self.inds = inds
self.mean_contacts = mean_contacts
self.dispersion = dispersion
self.dynamic = dynamic
self.update(force=True)
@staticmethod
@nb.njit
def _get_contacts(inds, number_of_contacts):
"""
Configuration model network generation
Args:
inds: person indices
number_of_contacts: number of contacts for each ind
"""
total_number_of_half_edges = np.sum(number_of_contacts)
count = 0
source = np.zeros((total_number_of_half_edges,), dtype=cvd.default_int)
for i, person_id in enumerate(inds):
n_contacts = number_of_contacts[i]
source[count:count + n_contacts] = person_id
count += n_contacts
target = np.random.permutation(source)
return source, target
def update(self, force: bool = False) -> None:
#Dynimically update network contacts
if not self.dynamic and not force:
return
n_people = len(self.inds)
# sample from pois or nb
if | pd.isna(self.dispersion) | pandas.isna |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
Timedelta("foo")
def test_full_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
d1 = np.timedelta64(1, "D")
assert Timedelta("1days") == conv(d1)
assert Timedelta("1days,") == conv(d1)
assert Timedelta("- 1days,") == -conv(d1)
assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.01") == conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
assert Timedelta("1days, 06:00:01") == conv(
d1 + np.timedelta64(6 * 3600 + 1, "s")
)
assert Timedelta("1days, 06:00:01.01") == conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
# invalid
msg = "have leftover units"
with pytest.raises(ValueError, match=msg):
Timedelta("- 1days, 00")
def test_pickle(self):
v = Timedelta("1 days 10:11:12.0123456")
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, "D")
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)]
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, "ns")
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == iNaT + 1
assert max_td.value == lib.i8max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
min_td - Timedelta(2, "ns")
with pytest.raises(OverflowError, match=msg):
max_td + Timedelta(1, "ns")
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, "ns")
assert td is NaT
with pytest.raises(OverflowError, match=msg):
Timedelta(min_td.value - 2, "ns")
with pytest.raises(OverflowError, match=msg):
Timedelta(max_td.value + 1, "ns")
def test_total_seconds_precision(self):
# GH 19458
assert Timedelta("30S").total_seconds() == 30.0
assert Timedelta("0").total_seconds() == 0.0
assert | Timedelta("-2S") | pandas.Timedelta |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
from .._utils import color_digits, color_background
from ..data import Data, DataSamples
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc
#rom scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
import gc
#import weakref
import copy
import itertools
import calendar
#from ..cross import DecisionTree, Crosses
import networkx as nx
from operator import itemgetter
import matplotlib.ticker as mtick
try:
import fastcluster
except Exception:
print('For fullness analysis using hierarchical clustering please install fastcluster package.')
from scipy.cluster.hierarchy import fcluster
try:
import hdbscan
except Exception:
print('For fullness analysis using HDBSCAN clustering please install hdbscan package.')
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
from os import system
from IPython.display import Image as Display_Image
#from joblib import Parallel, delayed
# Created by <NAME> and <NAME>
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class Processor(metaclass = ABCMeta):
"""
Base class for processing objects of Data class
"""
@abstractmethod
def __init__(self):
'''
self.stats is a DataFrame with statistics about self.work()
'''
self.stats = pd.DataFrame()
@abstractmethod
def work(self, data, parameters):
pass
def param_dict_to_stats(self, data, d):
'''
TECH
Transforms a dict of parameters to self.stats
Parameters
-----------
data: Data object being processed
d: dictionary {action : list_of_features} where action is a string with action description and list_of_features is a list of features' names to apply the action to
'''
col1 = []
col2 = []
for (how, features) in d.items():
col1 = col1 + [how + ' (' + str(round(data.dataframe[features[i]].mean(), 3)) + ')' if how == 'mean' else how for i in range(len(features))]
col2 = col2 + features
self.stats = pd.DataFrame({'action' : col1, 'features': col2})
#---------------------------------------------------------------
class MissingProcessor(Processor):
'''
Class for missing values processing
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, parameters, quantiles=100, precision=4):
'''
Deals with missing values
Parameters:
-----------
data: an object of Data type that should be processed
inplace: whether to change the data or to create a new Data object
parameters: {how_to_process : features_to_process}
how_to_process takes:
'delete' - to delete samples where the value of any feature from features_to_process is missing
'mean' - for each feature from features_to_process to fill missings with the mean value
'distribution' - for each feature from features_to_process to fill missings according to non-missing distribution
a value - for each feature from features_to_process to fill missings with this value
features_to_process takes list of features from data
quantiles: number of quantiles for 'distribution' type of missing process - all values are divided into quantiles,
then missing values are filled with average values of quantiles. If number of unique values is less then number of quantiles
or field type is not int, float, etc, then no quantiles are calculated - missings are filled with existing values according
to their frequency
precision: precision for quantile edges and average quantile values
Returns:
----------
A copy of data with missings processed for features mentioned in parameters
'''
for how in parameters:
if isinstance(parameters[how], str):
parameters[how] = [parameters[how]]
result = data.dataframe.copy()
for how in parameters:
if how == 'delete':
for feature in parameters[how]:
result = result[result[feature].isnull() == False]
if data.features != None and feature in data.features:
data.features.remove(feature)
elif how == 'mean':
for feature in parameters[how]:
result[feature].fillna(result[feature].mean(), inplace = True)
elif how == 'distribution':
for feature in parameters[how]:
if data.dataframe[feature].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[feature].unique().shape[0]<quantiles:
summarized=data.dataframe[[feature]].dropna().groupby(feature).size()
summarized=summarized.reset_index().rename({feature:'mean', 0:'size'}, axis=1)
else:
summarized=data.dataframe[[feature]].rename({feature:'value'}, axis=1).join(pd.qcut(data.dataframe[feature].dropna(), q=quantiles, precision=4, duplicates='drop')).groupby(feature).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index(drop=True)
#summarized=summarized.reset_index()
summarized['p']=summarized['size']/summarized['size'].sum()
result[feature]=result[feature].apply(lambda x: np.random.choice(summarized['mean'].round(precision), p=summarized['p']) if pd.isnull(x) else x)
else:
result[parameters[how]] = result[parameters[how]].fillna(how)
# statistics added on Dec-04-2018
self.param_dict_to_stats(data, parameters)
return Data(result, data.target, data.features, data.weights, data.name)
#---------------------------------------------------------------
class StabilityAnalyzer(Processor):
'''
For stability analysis
'''
def __init__(self):
self.stats = pd.DataFrame({'sample_name' : [], 'parameter' : [], 'meaning': []})
def work(self, data, time_column, sample_name = None, psi = None, event_rate=None, normalized=True, date_format = "%d.%m.%Y", time_func = (lambda x: 100*x.year + x.month),
yellow_zone = 0.1, red_zone = 0.25, figsize = None, out = True, out_images = 'StabilityAnalyzer/', sep=';', base_period_index=0):
'''
Calculates the dynamic of feature (or groups of values) changes over time so it should be used only for discrete or WOE-transformed
features. There are 2 types of analysis:
PSI. Represents a heatmap (Stability Table) of features stability that contains 3 main zones: green (the feature is
stable), yellow (the feature is not very stable) and red (the feature is unstable). StabilityIndex (PSI) is calculated for each
time period relatively to the first period.
Stability index algorithm:
For each feature value and time period number of samples is calculated: e.g., N[i, t] is number of samples for value i and time period t.
StabilityIndex[t] = (N[i, t]/sum_i(N[i, t]) - (N[i, 0]/sum_i(N[i, 0])))* log(N[i, t]/sum_i(N[i, t])/(N[i, 0]/sum_i(N[i, 0])))
ER (event rate). Calculates average event rate and number of observations for each feature's value over time.
After calculation displays the Stability Table (a heatmap with stability indexes for each feature value and time period)
and Event rate graphs
Parameters:
-----------
data: data to analyze (type Data)
time_column: name of a column with time values to calculate time periods
sample_name: name of sample for report
psi: list of features for PSI analysis (if None then all features from the input Data object will be used)
event_rate: list of features for event rate and distribution in time analysis (if None then all features from the input Data object will be used)
date_format: format of time values in time_column. Codes for format:
%a Weekday as locale’s abbreviated name. Sun, Mon, …, Sat (en_US)
%A Weekday as locale’s full name. Sunday, Monday, …, Saturday (en_US)
%w Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, …, 6
%d Day of the month as a zero-padded decimal number. 01, 02, …, 31
%b Month as locale’s abbreviated name. Jan, Feb, …, Dec (en_US)
%B Month as locale’s full name. January, February, …, December (en_US)
%m Month as a zero-padded decimal number. 01, 02, …, 12
%y Year without century as a zero-padded decimal number. 00, 01, …, 99
%Y Year with century as a decimal number. 1970, 1988, 2001, 2013
%H Hour (24-hour clock) as a zero-padded decimal number. 00, 01, …, 23
%I Hour (12-hour clock) as a zero-padded decimal number. 01, 02, …, 12
%p Locale’s equivalent of either AM or PM. AM, PM (en_US)
%M Minute as a zero-padded decimal number. 00, 01, …, 59
%S Second as a zero-padded decimal number. 00, 01, …, 59
%f Microsecond as a decimal number, zero-padded on the left. 000000, 000001, …, 999999
%z UTC offset in the form +HHMM or -HHMM (empty string if the the
object is naive). (empty), +0000, -0400, +1030
%Z Time zone name (empty string if the object is naive). (empty), UTC, EST, CST
%j Day of the year as a zero-padded decimal number. 001, 002, …, 366
%U Week number of the year (Sunday as the first day of the week)
as a zero padded decimal number. All days in a new year preceding
the first Sunday are considered to be in week 0. 00, 01, …, 53 (6)
%W Week number of the year (Monday as the first day of the week) as
a decimal number. All days in a new year preceding the first
Monday are considered to be in week 0. 00, 01, …, 53 (6)
%c Locale’s appropriate date and time representation. Tue Aug 16 21:30:00 1988 (en_US)
%x Locale’s appropriate date representation. 08/16/88 (None); 08/16/1988 (en_US)
%X Locale’s appropriate time representation. 21:30:00 (en_US)
time_func: function for time_column parsing (changes date to some value, representing time period) or
a period type for dt.to_period() function. Codes for available periods:
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
yellow_zone: the lower border for the yellow stability zone ('not very stable') in percents of derivation
red_zone: the lower border for the red stability zone ('unstable') in percents of derivation
figsize: matplotlib figsize of the Stability Table
out: a boolean for image output or a path for xlsx output file to export the Stability Tables
out_images: a path for image output (default - StabilityAnalyzer/)
sep: the separator to be used in case of csv export
base_period_index: index of period (starting from 0) for other periods to compare with (0 for the first, -1 for the last)
'''
print('Warning: only for discrete features!!!')
if sample_name is None:
if pd.isnull(data.name):
sample_name = 'sample'
else:
sample_name = data.name
out_images = out_images + sample_name + '/'
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out'], 'meaning' : [out]}))
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out_images'], 'meaning' : [out_images]}))
psi = data.features.copy() if psi is None else [x for x in psi if x in data.features]
event_rate = data.features.copy() if event_rate is None else [x for x in event_rate if x in data.features]
all_features=list(set(psi+event_rate))
if figsize is None:
figsize=(12, max(1,round(len(psi)/2,0)))
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
writer = pd.ExcelWriter(out, engine='openpyxl')
tmp_dataset = data.dataframe[all_features + [time_column, data.target] + ([] if data.weights is None else [data.weights])].copy()
tmp_dataset[time_column] = pd.to_datetime(tmp_dataset[time_column], format=date_format, errors='coerce')
if callable(time_func):
tmp_dataset['tt'] = tmp_dataset[time_column].map(time_func)
elif isinstance(time_func, str):
try:
tmp_dataset['tt'] = tmp_dataset[time_column].dt.to_period(time_func).astype(str)
except Exception:
print('No function or correct period code was provided. Return None.')
return None
c = 0
for feature in sorted(all_features):
print (feature)
if data.weights is not None:
feature_stats=tmp_dataset[[feature, 'tt', data.target, data.weights]]
feature_stats['---weight---']=feature_stats[data.weights]
else:
feature_stats=tmp_dataset[[feature, 'tt', data.target]]
feature_stats['---weight---']=1
feature_stats[data.target]=feature_stats[data.target]*feature_stats['---weight---']
feature_stats=feature_stats[[feature, 'tt', data.target, '---weight---']].groupby([feature, 'tt'], as_index=False).\
agg({'---weight---':'size', data.target:'mean'}).rename({feature:'value', '---weight---':'size', data.target:'mean'}, axis=1)
feature_stats['feature']=feature
if c == 0:
all_stats = feature_stats
c = c+1
else:
all_stats = all_stats.append(feature_stats, ignore_index=True)
all_stats['size']=all_stats['size'].astype(float)
all_stats['mean']=all_stats['mean'].astype(float)
if len(psi)>0:
stability1=all_stats[all_stats.feature.isin(psi)][['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
stability1.columns.name=None
#display(stability1)
dates = stability1.drop(['feature', 'value'], 1).columns.copy()
stability2 = stability1[['feature', 'value']].copy()
for date in dates:
stability2[date] = list(stability1[date]/list(stability1.drop(['value'], 1).groupby(by = 'feature').sum()[date][:1])[0])
#display(stability2)
start_date = dates[base_period_index]
stability3 = stability2[['feature', 'value']]
for date in dates:
stability3[date] = round(((stability2[date]-stability2[start_date])*np.log(stability2[date]/stability2[start_date])).fillna(0), 2).replace([])
#display(stability3)
stability4 = stability3.drop(['value'], 1).groupby(by = 'feature').sum()
#display(stability4)
fig, ax = plt.subplots(figsize = figsize)
ax.set_facecolor("red")
sns.heatmap(stability4, ax=ax, yticklabels=stability4.index, annot = True, cmap = 'RdYlGn_r', center = yellow_zone, vmax = red_zone, linewidths = .05, xticklabels = True)
if out==True or isinstance(out, str):
plt.savefig(out_images+"stability.png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
stability4.style.apply(color_background,
mn=0, mx=red_zone, cntr=yellow_zone).to_excel(writer, engine='openpyxl', sheet_name='PSI')
worksheet = writer.sheets['PSI']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['B2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if len(event_rate)>0:
for_event_rate=all_stats[all_stats['feature'].isin(event_rate)]
date_base=pd.DataFrame(all_stats['tt'].unique(), columns=['tt']).sort_values('tt')
for feature in sorted(for_event_rate['feature'].unique()):
cur_feature_data=for_event_rate[for_event_rate['feature']==feature].copy()
#display(cur_feature_data)
if normalized:
for tt in sorted(cur_feature_data['tt'].unique(), reverse=True):
cur_feature_data.loc[cur_feature_data['tt']==tt, 'percent']=cur_feature_data[cur_feature_data['tt']==tt]['size']/cur_feature_data[cur_feature_data['tt']==tt]['size'].sum()
#display(cur_feature_data)
fig, ax = plt.subplots(1,1, figsize=(15, 5))
ax2 = ax.twinx()
ax.grid(False)
ax2.grid(False)
sorted_values=sorted(cur_feature_data['value'].unique(), reverse=True)
for value in sorted_values:
to_visualize='percent' if normalized else 'size'
value_filter = (cur_feature_data['value']==value)
er=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')['mean']
height=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')[to_visualize].fillna(0)
bottom=date_base.merge(cur_feature_data[['tt',to_visualize]][cur_feature_data['value']>value].groupby('tt', as_index=False).sum(), on='tt', how='left')[to_visualize].fillna(0)
ax.bar(range(date_base.shape[0]), height, bottom=bottom if value!=sorted_values[0] else None, edgecolor='white', alpha=0.3)
ax2.plot(range(date_base.shape[0]), er, label=str(round(value,3)), linewidth=2)
plt.xticks(range(date_base.shape[0]), date_base['tt'])
fig.autofmt_xdate()
ax2.set_ylabel('Event Rate')
ax2.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
ax2.annotate('Obs:', xy=(0, 1), xycoords=('axes fraction', 'axes fraction'), xytext=(-25, 5), textcoords='offset pixels', color='blue', size=11)
for i in range(date_base.shape[0]):
ax2.annotate(str(int(cur_feature_data[cur_feature_data['tt']==date_base['tt'][i]]['size'].sum())),
xy=(i, 1),
xycoords=('data', 'axes fraction'),
xytext=(0, 5),
textcoords='offset pixels',
#rotation=60,
ha='center',
#va='bottom',
color='blue',
size=11)
ax.set_ylabel('Total obs')
plt.xlabel(time_column)
plt.suptitle(feature + ' event rate in time' if callable(time_func) else feature + ' event rate in time, period = '+time_func)
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles[::-1], labels[::-1], loc=0, fancybox=True, framealpha=0.3)
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
event_rate_df=all_stats[['feature', 'value', 'tt', 'mean']].pivot_table(values='mean', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
event_rate_df.columns.name=None
event_rate_df.style.apply(color_background,
mn=0, mx=all_stats['mean'].mean()+2*all_stats['mean'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn_r, subset=pd.IndexSlice[:, [x for x in event_rate_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Event Rate', index=False)
worksheet = writer.sheets['Event Rate']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
if x[0].column!='B':
for cell in worksheet[x[0].column]:
if cell.row!=1:
cell.number_format = '0.000%'
worksheet.freeze_panes = worksheet['C2']
size_df=all_stats[['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
size_df.columns.name=None
size_df.style.apply(color_background,
mn=0, mx=all_stats['size'].mean()+2*all_stats['size'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn, subset=pd.IndexSlice[:, [x for x in size_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Observations', index=False)
worksheet = writer.sheets['Observations']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['C2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if isinstance(out, str):
writer.close()
#---------------------------------------------------------------
class DataVisualizer(Processor):
'''
Supports different types of data visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, distribution = True, factorplot = True, factorplot_separate = False, pairplot = None,
out=False, out_images='DataVisualizer/', plot_cells=20, categorical=None):
'''
Produces distribution plot, factorplot, pairplot
Parameters:
-----------
data: data to visualize
distribution: parameter for a distribution plot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use distribution plot
factorplot: parameter for a factorplot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use factorplot
factorplot_separate: if True then separate plots for each target value
pairplot: list of features to make a pairplot for
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - DataVisualizer/)
plot_cells: how many cells would plots get in output excel
categorical: a list of features to be treated as categorical (countplots will be produced instead of distplots)
'''
if pairplot is None:
pairplot=[]
if categorical is None:
categorical=[]
dataframe_t = data.dataframe[data.features + [data.target]].copy()
data = Data(dataframe_t, features = data.features, target = data.target)
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Data Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_plot_number=0
if distribution:
print ('Distributions of features: ')
if type(distribution) == type([1, 1]):
features = distribution
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
for feature in features:
current_plot_number=current_plot_number+1
if data.dataframe[feature].dtype==object or feature in categorical:
f, axes = plt.subplots()
sns.countplot(data.dataframe[feature].dropna())
f.autofmt_xdate()
else:
sns.distplot(data.dataframe[feature].dropna())
if data.dataframe[feature].isnull().any():
plt.title(feature+' (miss = ' + str(round(data.dataframe[feature].isnull().value_counts()[True]/data.dataframe.shape[0],3))+')')
else:
plt.title(feature+' (miss = 0)')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_d.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_d.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Distribution plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_d.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if factorplot:
print ('Factorplot: ')
if type(factorplot) == type([1, 1]):
features = factorplot
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
if factorplot_separate:
for feature in features:
current_plot_number=current_plot_number+1
# edited 21-Jun-2018 by <NAME>
f, axes = plt.subplots(data.dataframe[data.target].drop_duplicates().shape[0], 1, figsize=(4, 4), sharex=True)
f.autofmt_xdate()
#for target_v in data.dataframe[data.target].drop_duplicates():
targets = list(data.dataframe[data.target].drop_duplicates())
for target_i in range(len(targets)):
if data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().any():
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = ' + str(round(data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().value_counts()[True]/data.dataframe[data.dataframe[data.target]==targets[target_i]].shape[0],3))
else:
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = 0'
if data.dataframe[feature].dtype==object or feature in categorical:
ax=sns.countplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i], color = 'm')
ax.set(xlabel=x_label)
else:
sns.distplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i],
axlabel=x_label, color = 'm')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
else:
for feature in features:
current_plot_number=current_plot_number+1
sns.factorplot(x=feature, hue = data.target, data = data.dataframe, kind='count', palette = 'Set1')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if pairplot != []:
current_plot_number=current_plot_number+1
print ('Pairplot')
sns.pairplot(data.dataframe[pairplot].dropna())
if out==True or isinstance(out, str):
plt.savefig(out_images+"pairplot.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Pair plot for '+str(pairplot)+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+"pairplot.png")
plt.show()
if isinstance(out, str):
workbook.close()
#---------------------------------------------------------------
class TargetTrendVisualizer(Processor):
'''
Supports target trend visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, features=None, quantiles=100, magnify_trend=False, magnify_std_number=2, hide_every_even_tick_from=50,
min_size=10, out=False, out_images='TargetTrendVisualizer/', plot_cells=20):
'''
Calculates specified quantiles/takes categories, calculates target rates and sizes, then draws target trends
Parameters:
-----------
data: an object of Data type
features: the list of features to visualize, can be omitted
quantiles: number of quantiles to cut feature values on
magnify_trend: if True, then axis scale for target rate will be corrected to exclude outliers
magnify_std_number: how many standard deviations should be included in magnified scale
hide_every_even_tick_from: if there is too many quantiles then every second tick on x axis will be hidden
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - TargetTrendVisualizer/)
plot_cells: how many cells would plots get in output excel
'''
if features is None:
cycle_features=data.features.copy()
else:
cycle_features=features.copy()
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Target Trend Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_feature_number=0
for f in cycle_features:
if f not in data.dataframe:
print('Feature', f, 'not in input dataframe. Skipping..')
else:
print('Processing', f,'..')
current_feature_number=current_feature_number+1
if data.dataframe[f].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[f].unique().shape[0]<quantiles:
summarized=data.dataframe[[f, data.target]].groupby([f]).agg(['mean', 'size'])
else:
if data.dataframe[f].dropna().shape[0]<min_size*quantiles:
current_quantiles=int(data.dataframe[f].dropna().shape[0]/min_size)
if current_quantiles==0:
print('The number of non-missing observations is less then', min_size,'. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'The number of non-missing observations is less then '+str(min_size)+'. No trend to visualize.')
continue
else:
print('Too few non-missing observations for', quantiles, 'quantiles. Calculating', current_quantiles, 'quantiles..')
else:
current_quantiles=quantiles
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
small_quantiles=summarized[data.target][summarized[data.target]['size']<min_size]['size']
#display(small_quantiles)
if small_quantiles.shape[0]>0:
current_quantiles=int(small_quantiles.sum()/min_size)+summarized[data.target][summarized[data.target]['size']>=min_size].shape[0]
print('There are quantiles with size less then', min_size,'. Attempting', current_quantiles, 'quantiles..')
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index()
if pd.isnull(data.dataframe[f]).any():
with_na=data.dataframe[[f,data.target]][pd.isnull(data.dataframe[f])]
summarized.loc[-1]=[np.nan, with_na[data.target].mean(), with_na.shape[0]]
summarized=summarized.sort_index().reset_index(drop=True)
if summarized.shape[0]==1:
print('Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
continue
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
ax.set_ylabel('Observations')
# blue is for the distribution
if summarized.shape[0]>hide_every_even_tick_from:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=60, ha="right")
xticks = ax.xaxis.get_major_ticks()
for i in range(len(xticks)):
if i%2==0:
xticks[i].label1.set_visible(False)
else:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=45, ha="right")
ax.bar(range(summarized.shape[0]), summarized['size'], zorder=0, alpha=0.3)
ax.grid(False)
ax.grid(axis='y', zorder=1, alpha=0.6)
ax2 = ax.twinx()
ax2.set_ylabel('Target Rate')
ax2.grid(False)
#display(summarized)
if magnify_trend:
ax2.set_ylim([0, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))])
for i in range(len(summarized['mean'])):
if summarized['mean'][i]>np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size'])):
ax2.annotate(str(round(summarized['mean'][i],4)),
xy=(i, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
xytext=(i, np.average(summarized['mean'], weights=summarized['size'])+(magnify_std_number+0.05)*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
rotation=60,
ha='left',
va='bottom',
color='red',
size=8.5
)
# red is for the target rate values
ax2.plot(range(summarized.shape[0]), summarized['mean'], 'ro-', linewidth=2.0, zorder=4)
if out==True or isinstance(out, str):
plt.savefig(out_images+f+".png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+f+".png").size[1]
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.insert_image((current_feature_number-1)*(plot_cells+1)+1, 0, out_images+f+".png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
if isinstance(out, str):
workbook.close()
class CorrelationAnalyzer(Processor):
'''
Produces correlation analysis
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = True, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
drop_with_most_correlations=True, verbose=False, out_before=None, out_after=None, sep=';', cdict = None):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features.
For each highly correlated pair the algorithm chooses the less significant feature and adds it to the delete list.
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples, train sample will be checked)
drop_features: permission to delete correlated features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
drop_with_most_correlations: should the features with the highest number of correlations be excluded first (otherwise just with any number of correlations and the lowest gini)
verbose: flag for detailed output
out_before: file name for export of correlation table before feature exclusion (.csv and .xlsx types are supported)
out_after: file name for export of correlation table after feature exclusion (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
--------
Resulting Data or DataSamples object and the correlation table
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'method' : [method], 'out_before' : out_before, 'out_after' : out_after})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features == [] or features is None:
candidates = sample.features.copy()
else:
candidates = features.copy()
features_to_drop = []
correlations = sample.dataframe[candidates].corr(method = method)
cor_out=correlations.copy()
if cdict is None:
cdict = {'red' : ((0.0, 0.9, 0.9),
(0.5, 0.05, 0.05),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.8, 0.8),
(1.0, 0.0, 0.0)),
'blue' : ((0.0, 0.1, 0.1),
(0.5, 0.1, 0.1),
(1.0, 0.1, 0.1))}
#edited 21.08.2018 by <NAME> - added verbose variant, optimized feature dropping
# edited on Dec-06-18 by <NAME>: added png
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
if out_before is not None:
out_before_png = 'corr_before.png'
if out_before[-4:]=='.csv':
draw_corr.round(2).to_csv(out_before, sep = sep)
out_before_png = out_before[:-4] + '.png'
elif out_before[-5:]=='.xlsx' or out_before[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_before, engine='openpyxl', sheet_name='Correlation (before)')
out_before_png = out_before[:-5] + '.png' if out_before[-5:]=='.xlsx' else out_before[:-4] + '.png'
elif out_before[-4:]=='.png':
out_before_png = out_before
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
fig_before = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_before.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_before.savefig(out_before_png, bbox_inches='tight')
plt.close()
self.stats['out_before'] = out_before_png
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
to_check_correlation=True
while to_check_correlation:
to_check_correlation=False
corr_number={}
significantly_correlated={}
for var in correlations:
var_corr=correlations[var].apply(lambda x: abs(x))
var_corr=var_corr[(var_corr.index!=var) & (var_corr>threshold)].sort_values(ascending=False).copy()
corr_number[var]=var_corr.shape[0]
significantly_correlated[var]=str(var_corr.index.tolist())
if drop_with_most_correlations:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]==max({x:corr_number[x] for x in corr_number if x not in features_to_leave}.values()) and corr_number[x]>0 and x not in features_to_leave}
else:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]>0 and x not in features_to_leave}
if len(with_correlation)>0:
feature_to_drop=min(with_correlation, key=with_correlation.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high correlation with features: %(f)s (Gini=%(g)0.2f)' % {'v':feature_to_drop, 'f':significantly_correlated[feature_to_drop], 'g':with_correlation[feature_to_drop]})
correlations=correlations.drop(feature_to_drop,axis=1).drop(feature_to_drop,axis=0).copy()
to_check_correlation=True
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
out_after_png = 'corr_after.png'
if out_after is not None:
if out_after[-4:]=='.csv':
draw_corr.round(2).to_csv(out_after, sep = sep)
out_after_png = out_after[:-4] + '.png'
elif out_after[-5:]=='.xlsx' or out_after[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_after, engine='openpyxl', sheet_name='Correlation (after)')
out_after_png = out_after[:-5] + '.png' if out_after[-5:]=='.xlsx' else out_after[:-4] + '.png'
elif out_after[-4:]=='.png':
out_after_png = out_after
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
#sns.heatmap(draw_corr.round(2), annot = True, cmap = 'RdBu_r', cbar = False, center = 0).figure.savefig(out_after_png, bbox_inches='tight')
fig_after = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_after.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_after.savefig(out_after_png, bbox_inches='tight')
plt.close()
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
self.stats['out_after'] = out_after_png
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
if verbose:
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, cor_out
def find_correlated_groups(self, data, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
verbose=False, figsize=(12,12), corr_graph_type='connected'):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features and
returns groups of significantly correlated features
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples it's train sample will be checked)
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be included in analysis
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
verbose: flag for detailed output
figsize: the size of correlation connections graph (printed if verbose)
corr_graph_type: type of connectivity to persue in finding groups of correlated features
'connected' - groups are formed from features directly or indirectly connected by singnificant correlation
'complete' - groups are formed from features that are directly connected to each other by significant
correlation (each pair of features from a group will have a significant connection)
Returns
--------
a list of lists representing correlated group
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if features == [] or features is None:
candidates = [x for x in sample.features if x not in features_to_leave]
else:
candidates = [x for x in features if x not in features_to_leave]
correlations = sample.dataframe[candidates].corr(method = method)
if verbose:
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
display(draw_corr.round(2).style.applymap(color_digits,threshold_red=threshold))
G=nx.Graph()
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.loc[correlations.columns[i], correlations.columns[j]]>threshold:
G.add_nodes_from([correlations.columns[i], correlations.columns[j]])
G.add_edge(correlations.columns[i], correlations.columns[j], label=str(round(correlations.loc[correlations.columns[i], correlations.columns[j]],3)))
if verbose:
plt.figure(figsize=(figsize[0]*1.2, figsize[1]))
pos = nx.spring_layout(G, k=100)
edge_labels = nx.get_edge_attributes(G,'label')
nx.draw(G, pos, with_labels=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labels)
plt.margins(x=0.2)
plt.show()
correlated_groups=[]
if corr_graph_type=='connected':
for x in nx.connected_components(G):
correlated_groups.append(sorted(list(x)))
elif corr_graph_type=='complete':
for x in nx.find_cliques(G):
correlated_groups.append(sorted(x))
else:
print('Unknown correlation graph type. Please use "connected" or "complete". Return None.')
return None
return correlated_groups
#---------------------------------------------------------------
class VIF(Processor):
'''
Calculates variance inflation factor for each feature
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = False, features=None, features_to_leave=None, threshold = 5,
drop_with_highest_VIF=True, verbose=True, out=None, sep=';'):
'''
Parameters
-----------
data: a Data or DataSamples object to check VIF on (in case of DataSamples it's train sample will be checked)
drop_features: permition to delete excluded features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of VIF for feature to be excluded
drop_with_highest_VIF: should the features with the highest VIF be excluded first (otherwise just with the lowest gini)
verbose: flag for detailed output
out: file name for export of VIF values (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
---------
Data or DataSamples object without excluded features
A pandas DataFrame with VIF values on different iterations
'''
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'out' : [out]})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features is None:
features = sample.features.copy()
features_to_drop = []
to_check_VIF = True
vifs_df=pd.DataFrame(index=features)
iteration=-1
while to_check_VIF:
to_check_VIF = False
iteration=iteration+1
s = sample.target + ' ~ '
for f in features:
s = s + f + '+'
s = s[:-1]
# Break into left and right hand side; y and X
y_, X_ = dmatrices(formula_like=s, data=sample.dataframe, return_type="dataframe")
# For each Xi, calculate VIF
vifs = {features[i-1]:variance_inflation_factor(X_.values, i) for i in range(1, X_.shape[1])}
vifs_df=vifs_df.join(pd.DataFrame(vifs, index=[iteration]).T)
if drop_with_highest_VIF:
with_high_vif={x:sample.ginis[x] for x in vifs if vifs[x]==max({x:vifs[x] for x in vifs if x not in features_to_leave}.values()) and vifs[x]>threshold and x not in features_to_leave}
else:
with_high_vif={x:sample.ginis[x] for x in vifs if vifs[x]>threshold and x not in features_to_leave}
if len(with_high_vif)>0:
feature_to_drop=min(with_high_vif, key=with_high_vif.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high VIF (VIF=%(vi)0.2f, Gini=%(g)0.2f)' % {'v':feature_to_drop, 'vi':vifs[feature_to_drop], 'g':with_high_vif[feature_to_drop]})
features.remove(feature_to_drop)
to_check_VIF=True
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
out_png = 'VIF.png'
if out is not None:
if out[-4:]=='.csv':
vifs_df.round(2).to_csv(out, sep = sep)
out_png = out[:-4] + '.png'
elif out[-5:]=='.xlsx' or out[-4:]=='.xls':
vifs_df.round(2).style.applymap(color_digits, threshold_red=threshold).to_excel(out, engine='openpyxl', sheet_name='Variance Inflation Factor')
out_png = out[:-5] + '.png' if out[-5:]=='.xlsx' else out[:-4] + '.png'
elif out[-4:] == '.png':
out_png = out
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
vif_fig = sns.heatmap(vifs_df.round(2).sort_values(0, ascending = False), xticklabels = False, annot = True,
cmap = 'RdYlBu_r',
cbar = False, vmax = 5, yticklabels = True).figure
vif_fig.set_size_inches(vifs_df.shape[0]/4, vifs_df.shape[0]/2)
vif_fig.savefig(out_png, bbox_inches='tight')
plt.close()
self.stats['out'] = out_png
if verbose:
display(vifs_df.round(2).style.applymap(color_digits, threshold_red=threshold))
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, vifs_df
#---------------------------------------------------------------
class FeatureEncoder(Processor):
'''
For processing non-numeric features
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, how_to_code, inplace = False):
'''
Parameters
-----------
data: data to process, Data type
how_to_code: a dictionary {how: features_list} where 'how' can be 'one_hot' or 'seq'(means 'sequential') and 'features_list' is a list of columns in data to process
inplace: whether to change the data or to create a new Data object
Returns
---------
Data with additional features and dictionary for sequantial encoding
'''
result = data.dataframe.copy()
feature_list = data.features.copy()
d = {}
for how in how_to_code:
if how == 'one_hot':
for feature in how_to_code[how]:
one_hot = pd.get_dummies(result[feature])
one_hot.columns = [feature + '_' + str(c) for c in one_hot.columns]
feature_list = feature_list + list(one_hot.columns)
result = result.join(one_hot)
elif how == 'seq':
for feature in how_to_code[how]:
for (i, j) in enumerate(result[feature].drop_duplicates()):
d[j] = i
result[feature + '_code'] = result[feature].apply(lambda x: d[x])
feature_list = feature_list + [feature + '_code']
else:
print ('Do not understand your command. Please use "one_hot" or "seq" for how_to_code. Good luck.')
return None
self.param_dict_to_stats(data, how_to_code)
# for sequential, saves actual encoding
self.stats.loc[self.stats.action == 'seq', 'action'] = str(d)
if inplace:
data = Data(result, features = feature_list, target = data.target, weights = data.weights)
return d
else:
return Data(result, features = feature_list, target = data.target, weights = data.weights), d
#---------------------------------------------------------------
# Author - <NAME>
class GiniChecker(Processor):
'''
Class for gini checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, datasamples, gini_threshold=5, gini_decrease_threshold=0.2, gini_increase_restrict=True, verbose=False, with_test=False,
out=False, out_images='GiniChecker/'):
'''
Checks if gini of the feature is significant and stable enough
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
datasamples: an object of DataSamples type containing the samples to check input feature on
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean for image output or a path for csv/xlsx output file to export gini values
out_images: a path for image output (default - GiniChecker/)
Returns
----------
Boolean - whether the check was successful
and if isinstance(out,str) then dictionary of gini values for all available samples
'''
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if verbose:
print('Checking', feature.feature)
gini_correct=True
d=feature.transform(datasamples.train, original_values=True)
fpr, tpr, _ = roc_curve(d.dataframe[d.target], -d.dataframe[feature.feature+'_WOE'])
gini_train= (2*auc(fpr, tpr)-1)*100
if verbose:
print('Train gini = '+str(round(gini_train,2)))
if gini_train<gini_threshold:
gini_correct=False
if verbose:
print('Train gini is less then threshold '+str(gini_threshold))
samples=[datasamples.validate, datasamples.test]
sample_names=['Validate', 'Test']
gini_values={'Train':gini_train}
for si in range(len(samples)):
if samples[si] is not None:
d=feature.transform(samples[si], original_values=True)
fpr, tpr, _ = roc_curve(d.dataframe[d.target], -d.dataframe[feature.feature+'_WOE'])
gini = (2*auc(fpr, tpr)-1)*100
gini_values[samples[si].name]=gini
if verbose:
print(samples[si].name+' gini = '+str(round(gini,2)))
if with_test or samples[si].name!='Test':
if gini<gini_threshold:
gini_correct=False
if verbose:
print(samples[si].name+' gini is less then threshold '+str(gini_threshold))
decrease=1-gini/gini_train
if decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini decrease from Train to '+samples[si].name+' is greater then threshold: '+str(round(decrease,5))+' > '+str(gini_decrease_threshold))
if gini_increase_restrict and -decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini increase from Train to '+samples[si].name+' is greater then threshold: '+str(round(-decrease,5))+' > '+str(gini_decrease_threshold))
else:
gini_values[sample_names[si]]=None
gini_list=[]
if datasamples.bootstrap_base is not None:
db=feature.transform(datasamples.bootstrap_base.keep(feature.feature), original_values=True)
for bn in range(len(datasamples.bootstrap)):
d=db.dataframe.iloc[datasamples.bootstrap[bn]]
fpr, tpr, _ = roc_curve(d[db.target], -d[feature.feature+'_WOE'])
roc_auc = auc(fpr, tpr)
gini_list.append(round((roc_auc*2 - 1)*100, 2))
mean=np.mean(gini_list)
std=np.std(gini_list)
if verbose:
sns.distplot(gini_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,2))+', std = '+str(round(std,2)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.title(feature.feature, fontsize = 16)
if out:
plt.savefig(out_images+feature.feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if mean-1.96*std<gini_threshold:
gini_correct=False
if verbose:
print('Less then 95% of gini distribution is greater then threshold: (mean-1.96*std) '+str(round(mean-1.96*std,5))+' < '+str(gini_threshold))
val_decrease=1.96*std/mean
if val_decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini deviation from mean for 95% of distribution is greater then threshold: (1.96*std/mean) '+str(round(val_decrease,5))+' > '+str(gini_decrease_threshold))
if isinstance(out, str):
gini_values.update({'Bootstrap'+str(i):gini_list[i] for i in range(len(gini_list))})
return gini_correct, gini_values
else:
return gini_correct
#added 13.08.2018 by <NAME>
def work_all(self, woe, features=None, drop_features=False, gini_threshold=5, gini_decrease_threshold=0.2,
gini_increase_restrict=True, verbose=False, with_test=False, out=False, out_images='GiniChecker/', sep=';'):
'''
Checks if gini of all features from WOE object is significant and stable enough
Parameters
-----------
woe: an object of WOE type that should be checked
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean for image output or a path for csv/xlsx output file to export gini values
out_images: a path for image output (default - GiniChecker/)
sep: the separator to be used in case of csv export
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in WOE.feature_woes. Abort.')
return None
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
gini_correct={}
if isinstance(out, str):
gini_df=pd.DataFrame(columns=['Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))])
for feature in cycle_features:
if isinstance(out, str):
gini_correct[feature], gini_values=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, gini_threshold=gini_threshold,
gini_decrease_threshold=gini_decrease_threshold,
gini_increase_restrict=gini_increase_restrict, verbose=verbose, with_test=with_test,
out=out, out_images=out_images)
#print(feature, gini_values)
gini_df=gini_df.append(pd.DataFrame(gini_values, index=[feature]))
else:
gini_correct[feature]=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, gini_threshold=gini_threshold,
gini_decrease_threshold=gini_decrease_threshold,
gini_increase_restrict=gini_increase_restrict, verbose=verbose, with_test=with_test,
out=out, out_images=out_images)
if isinstance(out, str):
gini_df=gini_df[['Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))]].dropna(axis=1)
if out[-4:]=='.csv':
gini_df.to_csv(out, sep = sep)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
gini_df.style.apply(color_background,
mn=gini_df.min().min(), mx=gini_df.max().max(), cmap='RdYlGn').to_excel(writer, sheet_name='Gini by Samples')
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Gini by Samples']
for x in worksheet.columns:
worksheet.column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 12
writer.save()
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if gini_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if gini_correct[x]}
return gini_correct
def work_tree(self, dtree, input_df=None, gini_threshold=5, gini_decrease_threshold=0.2, gini_increase_restrict=True,
verbose=False, with_test=False, out=False):
'''
Checks if gini of the tree is significant and stable enough
Parameters
-----------
dtree: a cross.DecisionTree object
input_df: a DataFrame, containing tree description
datasamples: an object of DataSamples type containing the samples to check input tree on
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean flag for gini values output
Returns
----------
Boolean - whether the check was successful
and if out==True then dictionary of gini values for all available samples
'''
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
datasamples=dtree.datasamples
features=[x for x in dtree.features if x in tree_df]
#[x for x in tree_df.columns[:tree_df.columns.get_loc('node')] if tree_df[x].dropna().shape[0]>0]
if verbose:
print('Checking tree on', str(features))
gini_correct=True
samples=[datasamples.train, datasamples.validate, datasamples.test]
sample_names=['Train', 'Validate', 'Test']
gini_values={}
for si in range(len(samples)):
if samples[si] is not None:
to_check=samples[si].keep(features=features).dataframe
to_check['woe']=dtree.transform(to_check, tree_df, ret_values=['woe'])
fpr, tpr, _ = roc_curve(to_check[samples[si].target], -to_check['woe'])
gini = (2*auc(fpr, tpr)-1)*100
gini_values[samples[si].name]=gini
if verbose:
print(samples[si].name+' gini = '+str(round(gini,2)))
if with_test or samples[si].name!='Test':
if gini<gini_threshold:
gini_correct=False
if verbose:
print(samples[si].name+' gini is less then threshold '+str(gini_threshold))
if samples[si].name!='Train':
decrease=1-gini/gini_values['Train']
if decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini decrease from Train to '+samples[si].name+' is greater then threshold: '+str(round(decrease,5))+' > '+str(gini_decrease_threshold))
if gini_increase_restrict and -decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini increase from Train to '+samples[si].name+' is greater then threshold: '+str(round(-decrease,5))+' > '+str(gini_decrease_threshold))
else:
gini_values[sample_names[si]]=None
gini_list=[]
if datasamples.bootstrap_base is not None:
base_with_woe=datasamples.bootstrap_base.keep(features=features).dataframe
base_with_woe['woe']=dtree.transform(base_with_woe, tree_df, ret_values=['woe'])
for bn in range(len(datasamples.bootstrap)):
to_check=base_with_woe.iloc[datasamples.bootstrap[bn]]
fpr, tpr, _ = roc_curve(to_check[datasamples.bootstrap_base.target], -to_check['woe'])
roc_auc = auc(fpr, tpr)
gini_list.append(round((roc_auc*2 - 1)*100, 2))
mean=np.mean(gini_list)
std=np.std(gini_list)
if verbose>True:
sns.distplot(gini_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,2))+', std = '+str(round(std,2)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.title('Tree on '+str(features), fontsize = 16)
plt.show()
elif verbose:
print('Bootstrap: mean = '+str(round(mean,2))+', std = '+str(round(std,2)))
if mean-1.96*std<gini_threshold:
gini_correct=False
if verbose:
print('Less then 95% of gini distribution is greater then threshold: (mean-1.96*std) '+str(round(mean-1.96*std,5))+' < '+str(gini_threshold))
val_decrease=1.96*std/mean
if val_decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini deviation from mean for 95% of distribution is greater then threshold: (1.96*std/mean) '+str(round(val_decrease,5))+' > '+str(gini_decrease_threshold))
if out:
gini_values.update({'Bootstrap'+str(i):gini_list[i] for i in range(len(gini_list))})
return gini_correct, gini_values
else:
return gini_correct
#---------------------------------------------------------------
# Author - <NAME>
class BusinessLogicChecker(Processor):
'''
Class for business logic checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, conditions='', verbose=False, out=None):
'''
Checks if the business logic condition is True
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
conditions: a string with business logic conditions
for feature.categorical==True: 'cond_1;cond_2;...;cond_n', where cond_i
is 'A sign B', where A and B
are comma-separated lists of values (or nothing, but not both at the same time)
and where sign
is one of the following: <, >, =, <=, >=
each condition compares risk of bins with values from A to risk of bins with values from B (if B is omitted,
then risk of bins with values from A is compared to risk of bins with values not in A);
> means that risk of the second values group is smaller then the risk of the first values group (and values from
different groups cannot be in one bin), < means the opposite (again, values from different groups cannot be in one
bin), adding = allows values from different groups to be in one bin;
ALL of the conditions should be True or conditions should be empty for the input feature to pass the check
-----------------------------------------------------------------------------------------------------------
for feature.categorical==False:'cond_1;cond_2;....;cond_n (excl_1;...;excl_n)', where cond_i
is 'sign_1 value_2 sign_2 value_3 sign_3 ... value_n sign_n', where sign_i
is one of the following: <, >
and where value_i
is a float/int and can be omitted
and where excl_i
is a float/int and can be omitted (if there is not excl_i at all, then parentheses can be omitted too)
each condition describes how should risk be changing when feature values are increasing;
> means that risk will be monotonicaly decreasing with increase of values, < means the opposite, >< means that
risk decrease and then increase, adding value between signs tells, that in the bin with this value should be
the local risk extremum (>N< means that the bin with N in it should have the least risk);
adding values in () will result in exclusion of bins with these values before risk trend checking (and so bins
with these values are ignored);
each condition should start with a sign and end with a sign, one sign is permitter, values between signs
can be omitted;
ANY one of the conditions should be True for the input feature to pass the check
in case of conditions==None or conditions=='' checker wil return True is risk trend is monotonicaly
increasing/decresing (the same check will be processed if only values to exclude are provided)
verbose: if comments and graphs should be printed
out: a path for csv/xlsx output file to export business logic check results
Returns
----------
Boolean - whether the check was successful
and if out is not None then dataframe of check log
'''
if out is not None:
out_df=pd.DataFrame(columns=['feature', 'categorical', 'condition', 'fact', 'condition_result'])
if feature.categorical == False:
woes_dropna={feature.groups[x][0]:feature.woes[x] for x in feature.woes if isinstance(feature.groups[x],list)}
groups_info=pd.DataFrame(woes_dropna, index=['woe']).transpose().reset_index().rename({'index':'lower'}, axis=1)
groups_info['upper']=groups_info['lower'].shift(-1).fillna(np.inf)
if groups_info.shape[0]==1:
if verbose:
print('Only 1 group with non-missing values is present. Skipping trend check..')
all_cond_correct=True
else:
all_cond_correct=False
for c in conditions.split(';'):
#find all floats/ints between > and < - minimal risk
#first there should be >, then + or - or nothing, then at least one digit, then . or , or nothing, then zero or more digits and < after that
min_risk = re.findall('(?<=>)[-+]?\d+[.,]?\d*(?=<)', c)
#find all floats/ints between < and > - maximal risk
max_risk = re.findall('(?<=<)[-+]?\d+[.,]?\d*(?=>)', c)
#find all floats/ints between ( and ), ( and ; or ; and ) - values to exclude (without risk checking)
excl_risk = re.findall('(?<=[(;])[-+]?\d+[.,]?\d*(?=[;)])', c)
clear_condition=''.join(x for x in c if x in '<>')
gi_check=groups_info.dropna(how='all', subset=['lower','upper'])[['woe','lower','upper']].copy()
for excl in excl_risk:
gi_check=gi_check[((gi_check['lower']<=float(excl)) & (gi_check['upper']>float(excl)))==False]
gi_check['risk_trend']=np.sign((gi_check['woe']-gi_check['woe'].shift(1)).dropna()).apply(lambda x: '+' if (x<0) else '-' if (x>0) else '0')
trend=gi_check['risk_trend'].str.cat()
reg_exp=r''
for s in clear_condition:
if s=='>':
reg_exp=reg_exp+r'-+'
if s=='<':
reg_exp=reg_exp+r'\++'
if len(reg_exp)==0:
reg_exp='-*|\+*'
if re.fullmatch(reg_exp, trend):
trend_correct=True
if verbose:
print('Risk trend in data is consistent with input trend: input ', clear_condition, ', data ', trend)
else:
trend_correct=False
if verbose:
print('Risk trend in data is not consistent with input trend: input ', clear_condition, ', data ', trend)
#local risk minimums
min_risk_data=gi_check[(gi_check['risk_trend']=='-') & (gi_check['risk_trend'].shift(-1)=='+')].reset_index(drop=True)
min_risk_correct=True
for mr in range(len(min_risk)):
if mr+1<=min_risk_data.shape[0]:
if verbose:
print(feature.feature+': checking min risk in', min_risk[mr], '(between ', min_risk_data['lower'].loc[mr], ' and ', min_risk_data['upper'].loc[mr], ')')
min_risk_correct=min_risk_correct and (float(min_risk[mr])>=min_risk_data['lower'].loc[mr] and float(min_risk[mr])<min_risk_data['upper'].loc[mr])
else:
if verbose:
print(feature.feature+': not enough minimums in data to check', min_risk[mr])
min_risk_correct=False
#local risk maximums
max_risk_data=gi_check[(gi_check['risk_trend']=='+') & (gi_check['risk_trend'].shift(-1)=='-')].reset_index(drop=True)
max_risk_correct=True
for mr in range(len(max_risk)):
if mr+1<=max_risk_data.shape[0]:
if verbose:
print(feature.feature+': checking max risk in', max_risk[mr], '(between ', max_risk_data['lower'].loc[mr], ' and ', max_risk_data['upper'].loc[mr], ')')
max_risk_correct=max_risk_correct and (float(max_risk[mr])>=max_risk_data['lower'].loc[mr] and float(max_risk[mr])<max_risk_data['upper'].loc[mr])
else:
if verbose:
print(feature.feature+': not enough maximums in data to check', max_risk[mr])
min_risk_correct=False
all_cond_correct=all_cond_correct or (trend_correct and min_risk_correct and max_risk_correct)
if out is not None:
out_df=out_df.append(dict(feature=feature.feature, categorical=feature.categorical, condition=c, fact=trend, condition_result=trend_correct and min_risk_correct and max_risk_correct), ignore_index=True)
if verbose:
if all_cond_correct:
print(feature.feature+': business logic check succeeded.')
else:
fig=plt.figure(figsize=(5,0.5))
plt.plot(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
groups_info.dropna(how='all', subset=['lower','upper'])['woe'], color='red')
plt.xticks(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
round(groups_info.dropna(how='all', subset=['lower','upper'])['lower'],3))
plt.ylabel('WoE')
fig.autofmt_xdate()
plt.show()
print(feature.feature+': business logic check failed.')
if out is not None:
return all_cond_correct, out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result']]
else:
return all_cond_correct
else:
all_cond_correct=True
if conditions!='':
w={}
for x in feature.groups:
for y in feature.groups[x]:
w[y]=feature.woes[x]
groups_info=pd.DataFrame(w, index=['woe']).transpose().reset_index().rename({'index':'categories'}, axis=1)
groups_info=groups_info[groups_info['categories']!=-np.inf].reset_index(drop=True).copy()
cond_types2=['>=','=>','<=','=<']
cond_types1=['>','<','=']
for c in conditions.split(';'):
c0=[]
c1=[]
cond_type=[x for x in cond_types2 if x in c]
if len(cond_type)==0:
cond_type=[x for x in cond_types1 if x in c]
cond_type=cond_type[0]
if cond_type in ['>=', '=>', '>']:
c0=ast.literal_eval('['+c[:c.find(cond_type)]+']')
c1=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
elif cond_type in ['<=', '=<', '<']:
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
elif cond_type=='=':
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
can_be_equal=('=' in cond_type)
groups_info['risk_group']=groups_info['categories'].apply(lambda x: 0 if (x in c0 or (len(c0)==0 and x not in c1)) else 1 if (x in c1 or (len(c1)==0 and x not in c0)) else np.nan)
cond_correct = (cond_type!='=' and groups_info[groups_info['risk_group']==0]['woe'].max()<groups_info[groups_info['risk_group']==1]['woe'].min()) or (can_be_equal and (groups_info[groups_info['risk_group']==0]['woe'].max()==groups_info[groups_info['risk_group']==1]['woe'].min() or c0==c1))
all_cond_correct=all_cond_correct and cond_correct
if verbose:
print(feature.feature+': checking condition '+ c + ' => ' + str(cond_correct))
if out is not None:
out_df=out_df.append(dict(feature=feature.feature, categorical=feature.categorical, condition=c, fact='', condition_result=cond_correct), ignore_index=True)
if verbose:
print(feature.feature+': conditions ' + conditions + ' => ' + str(all_cond_correct))
else:
if verbose:
print(feature.feature+': no conditions were specified, business logic check succeeded.')
if out is not None:
return all_cond_correct, out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result']]
else:
return all_cond_correct
#added 13.08.2018 by <NAME>
def work_all(self, woe, features=None, input_conditions=None, drop_features=False, verbose=False, out=None, sep=';'):
'''
Checks if business logic conditions for all features from the WOE object are True
Parameters
-----------
woe: an object of FeatureWOE type that should be checked
input_conditions: adress for excel-file with business logic conditions (columns 'variable' and 'condition' are mandatory)
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
verbose: if comments and graphs should be printed
out: a path for csv/xlsx output file to export business logic check results
sep: the separator to be used in case of csv export
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if out is not None:
out_df=pd.DataFrame(columns=['feature', 'categorical', 'condition', 'fact', 'condition_result', 'overall_result'])
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in self.feature_woes. Abort.')
return None
business_logic_correct={}
'''
if conditions_dict is not None:
if isinstance(conditions_dict, dict):
conditions_dict=pd.DataFrame(conditions_dict, index=['conditions']).T
elif isinstance(conditions_dict, str) and (conditions_dict[-5:]=='.xlsx' or conditions_dict[-4:]=='.xls'):
try:
conditions=pd.read_excel(conditions_dict).set_index('variable')
conditions['conditions']=conditions['conditions'].apply(lambda x: '' if (pd.isnull(x)) else x)
except Exception:
print('No conditions dictionary was found / no "variable" or "conditions" fields were found. Abort.')
return None
elif isinstance(conditions_dict, str):
conditions_dict=pd.DataFrame({x:conditions_dict for x in cycle_features},
index=['conditions']).T
else:
conditions=pd.DataFrame()
'''
if input_conditions is None:
conditions_dict=pd.DataFrame(columns=['feature', 'conditions'])
elif isinstance(input_conditions, dict) or isinstance(input_conditions, pd.DataFrame):
conditions_dict=input_conditions.copy()
elif isinstance(input_conditions, str):
if input_conditions[-4:]=='.csv':
conditions_dict=pd.read_csv(input_conditions, sep = sep)
elif input_conditions[-4:]=='.xls' or input_conditions[-5:]=='.xlsx':
conditions_dict=pd.read_excel(input_conditions)
else:
print('Unknown format for path to conditions dictionary file. Return None.')
elif isinstance(input_conditions, tuple):
conditions_dict={x:input_conditions[0] if x not in woe.categorical else input_conditions[1] for x in cycle_features}
else:
print('Unknown format for conditions dictionary file. Return None')
return None
if isinstance(conditions_dict, pd.DataFrame):
for v in ['feature', 'variable', 'var']:
if v in conditions_dict:
break
try:
conditions_dict=dict(conditions_dict.fillna('').set_index(v)['conditions'])
except Exception:
print("No 'feature' ,'variable', 'var' or 'conditions' field in input pandas.DataFrame. Return None.")
return None
for feature in cycle_features:
if feature not in conditions_dict:
current_conditions=''
else:
current_conditions=conditions_dict[feature]
if out is not None:
business_logic_correct[feature], out_feature_df=self.work(woe.feature_woes[feature], conditions=current_conditions, verbose=verbose, out=out)
out_feature_df['overall_result']=business_logic_correct[feature]
out_df=out_df.append(out_feature_df, ignore_index=True)
else:
business_logic_correct[feature]=self.work(woe.feature_woes[feature], conditions=current_conditions, verbose=verbose, out=out)
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if business_logic_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if business_logic_correct[x]}
if out is not None:
out_df=out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result', 'overall_result']]
#display(out_df)
if out[-4:]=='.csv':
out_df.to_csv(out, sep = sep)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
out_df.style.apply(self.color_result, subset=pd.IndexSlice[:,['condition_result', 'overall_result']]).to_excel(writer, sheet_name='Business Logic', index=False)
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Business Logic']
for x in worksheet.columns:
worksheet.column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 20
writer.save()
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
return business_logic_correct
def work_tree(self, dtree, input_df=None, input_conditions=None, max_corrections=None, sep=';', to_correct=False, verbose=False):
'''
Checks if the business logic conditions are True in every node of the input tree and corrects the tree for it to pass the check
Parameters
-----------
dtree: a cross.DecisionTree object to check
input_df: a DataFrame, containing tree description
input_conditions: a DataFrame, a dictionary or a string with a path to conditions dictionary (in case of DataFrame or string
the field with features' names should be called 'feature', 'variable' or 'var')
for categorical features: 'cond_1;cond_2;...;cond_n', where cond_i
is 'A sign B', where A and B
are comma-separated lists of values (or nothing, but not both at the same time)
and where sign
is one of the following: <, >, =, <=, >=
each condition compares risk of bins with values from A to risk of bins with values from B (if B is omitted,
then risk of bins with values from A is compared to risk of bins with values not in A);
> means that risk of the second values group is smaller then the risk of the first values group (and values from
different groups cannot be in one bin), < means the opposite (again, values from different groups cannot be in one
bin), adding = allows values from different groups to be in one bin;
ALL of the conditions should be True or conditions should be empty for the input feature to pass the check
-----------------------------------------------------------------------------------------------------------
for interval features:'cond_1;cond_2;....;cond_n (excl_1;...;excl_n)', where cond_i
is 'sign_1 sign_2 sign_3 ... sign_n', where sign_i
is one of the following: <, >
and where excl_i
is a float/int and can be omitted (if there is not excl_i at all, then parentheses can be omitted too)
each condition describes how should risk be changing when feature values are increasing;
> means that risk will be monotonicaly decreasing with increase of values, < means the opposite, >< means that
risk decrease and then increase, values between signs will be ignored because for most of nodes entire sample won't be
available for division and extremum values' absense or the presence of new local extremums should not be prohibited;
adding values in () will result in exclusion of bins with these values before risk trend checking (and so bins
with these values are ignored);
each condition should start with a sign and end with a sign, one sign is permitted;
ANY one of the conditions should be True for the input feature to pass the check
in case of conditions==None or conditions=='' checker wil return True is risk trend is monotonicaly
increasing/decresing (the same check will be processed if only values to exclude are provided)
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
sep: a separator in case of csv import for conditions dictionary
to_correct: should there be attempts to correct tree by uniting nodes or not
verbose: if comments and graphs should be printed
Returns
----------
if to_correct:
True and a DataFrame with tree description - corrected or initial
else:
result of the input tree check and the input tree itself
'''
#-----------------------------------------------Subsidiary functions--------------------------------------------------
def bl_check_categorical(df, conditions, verbose=False, missing_group_is_correct=True):
'''
TECH
Check correctness of conditions for a categorical feature
Parameters
-----------
df: a DataFrame, containing lists of categories and WoE values
conditions: a string, containing business logic conditions for a feature
verbose: if comments should be printed
missing_group_is_correct: should missing of any value from condition in input data be considered as
successful check or not
Returns
----------
boolean flag of successful check
'''
all_cond_correct=True
if conditions!='':
tree_df=df.copy()
#display(tree_df)
cat_woes=[]
for i in tree_df.index:
categories, n, w = tree_df.loc[i]
#display(tree_df.loc[i])
#display(categories)
for c in categories:
cat_woes.append([c, n, w])
groups_info=pd.DataFrame(cat_woes, columns=['categories', 'nodes', 'woe'])
#display(groups_info)
cond_types2=['>=','=>','<=','=<']
cond_types1=['>','<','=']
for c in conditions.split(';'):
c0=[]
c1=[]
cond_type=[x for x in cond_types2 if x in c]
if len(cond_type)==0:
cond_type=[x for x in cond_types1 if x in c]
cond_type=cond_type[0]
if cond_type in ['>=', '=>', '>']:
c0=ast.literal_eval('['+c[:c.find(cond_type)]+']')
c1=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
elif cond_type in ['<=', '=<', '<']:
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
elif cond_type=='=':
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
can_be_equal=('=' in cond_type)
groups_info['risk_group']=groups_info['categories'].apply(lambda x: 0 if (x in c0 or (len(c0)==0 and x not in c1)) else 1 if (x in c1 or (len(c1)==0 and x not in c0)) else np.nan)
cond_correct = (cond_type!='=' and groups_info[groups_info['risk_group']==0]['woe'].max()<groups_info[groups_info['risk_group']==1]['woe'].min()) or \
(can_be_equal and (groups_info[groups_info['risk_group']==0]['woe'].max()==groups_info[groups_info['risk_group']==1]['woe'].min() or c0==c1)) or \
(missing_group_is_correct and len(groups_info['risk_group'].dropna().unique())<2)
all_cond_correct=all_cond_correct and cond_correct
if verbose:
print('\tChecking condition '+ c + ' => ' + str(cond_correct))
if verbose:
print('\tConditions ' + conditions + ' => ' + str(all_cond_correct))
elif verbose:
print('\tNo conditions were specified, business logic check succeeded.')
return all_cond_correct
def bl_check_interval(df, conditions, verbose=False):
'''
TECH
Check correctness of conditions for an interval feature
Parameters
-----------
df: a DataFrame, containing intervals' descriptions and WoE values
conditions: a string, containing business logic conditions for a feature
verbose: if comments should be printed
Returns
----------
boolean flag of successful check
'''
tree_df=df.copy()
split_feature=tree_df.columns[0]
groups_info=tree_df[pd.isnull(tree_df[split_feature])==False]
groups_info['upper']=groups_info[split_feature].apply(lambda x: x[0][1] if pd.isnull(x[1]) else x[1])
groups_info['lower']=groups_info[split_feature].apply(lambda x: x[0][0] if pd.isnull(x[1]) else x[0])
#display(groups_info)
if groups_info.shape[0]==1:
if verbose:
print('\tOnly 1 group with non-missing values is present. Skipping trend check..')
all_cond_correct=True
else:
all_cond_correct=False
for c in conditions.split(';'):
#find all floats/ints between > and < - minimal risk
#first there should be >, then + or - or nothing, then at least one digit, then . or , or nothing, then zero or more digits and < after that
#min_risk = re.findall('(?<=>)[-+]?\d+[.,]?\d*(?=<)', c)
#find all floats/ints between < and > - maximal risk
#max_risk = re.findall('(?<=<)[-+]?\d+[.,]?\d*(?=>)', c)
#find all floats/ints between ( and ), ( and ; or ; and ) - values to exclude (without risk checking)
excl_risk = re.findall('(?<=[(;])[-+]?\d+[.,]?\d*(?=[;)])', c)
clear_condition=''.join(x for x in c if x in '<>')
gi_check=groups_info.dropna(how='all', subset=['lower','upper'])[['woe','lower','upper']].copy()
for excl in excl_risk:
gi_check=gi_check[((gi_check['lower']<=float(excl)) & (gi_check['upper']>float(excl)))==False]
gi_check['risk_trend']=np.sign((gi_check['woe']-gi_check['woe'].shift(1)).dropna()).apply(lambda x: '+' if (x<0) else '-' if (x>0) else '0')
trend=gi_check['risk_trend'].str.cat()
reg_exp=r''
for s in clear_condition:
if s=='>':
reg_exp=reg_exp+r'-+'
if s=='<':
reg_exp=reg_exp+r'\++'
if len(reg_exp)==0:
reg_exp='-*|\+*'
if re.fullmatch(reg_exp, trend):
trend_correct=True
if verbose:
print('\tRisk trend in data is consistent with input trend: input ', clear_condition, ', data ', trend)
else:
trend_correct=False
if verbose:
print('\tRisk trend in data is not consistent with input trend: input ', clear_condition, ', data ', trend)
'''#local risk minimums
min_risk_data=gi_check[(gi_check['risk_trend']=='-') & (gi_check['risk_trend'].shift(-1)=='+')].reset_index(drop=True)
min_risk_correct=True
for mr in range(len(min_risk)):
if mr+1<=min_risk_data.shape[0]:
if verbose:
print('\tChecking min risk in', min_risk[mr], '(between ', min_risk_data['lower'].loc[mr], ' and ', min_risk_data['upper'].loc[mr], ')')
min_risk_correct=min_risk_correct and (float(min_risk[mr])>=min_risk_data['lower'].loc[mr] and float(min_risk[mr])<min_risk_data['upper'].loc[mr])
else:
if verbose:
print('\tNot enough minimums in data to check', min_risk[mr])
min_risk_correct=False
#local risk maximums
max_risk_data=gi_check[(gi_check['risk_trend']=='+') & (gi_check['risk_trend'].shift(-1)=='-')].reset_index(drop=True)
max_risk_correct=True
for mr in range(len(max_risk)):
if mr+1<=max_risk_data.shape[0]:
if verbose:
print('\tChecking max risk in', max_risk[mr], '(between ', max_risk_data['lower'].loc[mr], ' and ', max_risk_data['upper'].loc[mr], ')')
max_risk_correct=max_risk_correct and (float(max_risk[mr])>=max_risk_data['lower'].loc[mr] and float(max_risk[mr])<max_risk_data['upper'].loc[mr])
else:
if verbose:
print('\tNot enough maximums in data to check', max_risk[mr])
min_risk_correct=False
all_cond_correct=all_cond_correct or (trend_correct and min_risk_correct and max_risk_correct)'''
all_cond_correct=all_cond_correct or trend_correct
if verbose:
if all_cond_correct:
print('\tBusiness logic check succeeded.')
else:
fig=plt.figure(figsize=(5,0.5))
plt.plot(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
groups_info.dropna(how='all', subset=['lower','upper'])['woe'], color='red')
plt.xticks(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
round(groups_info.dropna(how='all', subset=['lower','upper'])['lower'],3))
plt.ylabel('WoE')
fig.autofmt_xdate()
plt.show()
print('\tBusiness logic check failed.')
return all_cond_correct
def bl_recursive_correct(tree_df, node, allowed_corrections=1, corrections=None, conditions='', max_corrections=1,
verbose=False):
'''
TECH
Recursive search of corrections needed for tree to pass business logic checks
Parameters
-----------
tree_df: a DataFrame, containing tree description
node: a node number, whose children are corrected and checked
allowed_corrections: a number of remaining corrections, that are allowed
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
corrections: the list of current corrections
conditions: a string, containing business logic conditions for a feature, by which current node was split
verbose: if comments and graphs should be printed
Returns
----------
boolean flag of corrected tree passing the check and
the list of corrections, that were made
'''
if corrections is None:
corrections=[]
split_feature=tree_df[(tree_df['node']==node)]['split_feature'].values[0]
if allowed_corrections>0:
possible_nodes_to_correct=sorted(tree_df[(tree_df['parent_node']==node)]['node'].tolist())
combinations=[]
for n1 in range(len(possible_nodes_to_correct)):
for n2 in range(len(possible_nodes_to_correct[n1+1:])):
if dtree.check_unitability(tree_df, [possible_nodes_to_correct[n1], possible_nodes_to_correct[n1+1:][n2]]):
first_condition=tree_df[(tree_df['node']==possible_nodes_to_correct[n1])][split_feature].values[0]
if not(isinstance(first_condition, list) or isinstance(first_condition, tuple)):
nodes_combination=[possible_nodes_to_correct[n1+1:][n2], possible_nodes_to_correct[n1]]
else:
nodes_combination=[possible_nodes_to_correct[n1], possible_nodes_to_correct[n1+1:][n2]]
combinations.append([nodes_combination,
abs(tree_df[tree_df['node']==possible_nodes_to_correct[n1]]['woe'].values[0]- \
tree_df[tree_df['node']==possible_nodes_to_correct[n1+1:][n2]]['woe'].values[0])])
combinations.sort(key=itemgetter(1))
for nodes_to_unite, woe in combinations:
if verbose:
print('Checking (',(max_corrections-allowed_corrections+1),'): for node', node, 'uniting children', str(nodes_to_unite), 'with woe difference =', woe)
tree_df_corrected=dtree.unite_nodes(tree_df, nodes_to_unite)
#display(tree_df_corrected)
if tree_df_corrected.shape[0]!=tree_df.shape[0]:
correct, final_corrections=bl_recursive_correct(tree_df_corrected, node, allowed_corrections-1, corrections+[nodes_to_unite],
conditions, max_corrections=max_corrections, verbose=verbose)
else:
correct=False
if correct:
return correct, final_corrections
else:
return False, corrections
else:
df_to_check=tree_df[(tree_df['parent_node']==node)][[split_feature, 'node', 'woe']]
categorical=sum([isinstance(x, list) for x in df_to_check[split_feature]])>0
if verbose:
print('Node', node, split_feature, (': Checking categorical business logic..' if categorical \
else ': Checking interval business logic..'))
correct=bl_check_categorical(df_to_check, conditions, verbose=verbose) if categorical \
else bl_check_interval(df_to_check, conditions, verbose=verbose)
return correct, corrections
#---------------------------------------------------------------------------------------------------------------------
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
features=[x for x in dtree.features if x in tree_df]
if input_conditions is None:
conditions_dict=pd.DataFrame(columns=['feature', 'conditions'])
elif isinstance(input_conditions, dict) or isinstance(input_conditions, pd.DataFrame):
conditions_dict=input_conditions.copy()
elif isinstance(input_conditions, str):
if input_conditions[-4:]=='.csv':
conditions_dict=pd.read_csv(input_conditions, sep = sep)
elif input_conditions[-4:]=='.xls' or input_conditions[-5:]=='.xlsx':
conditions_dict=pd.read_excel(input_conditions)
else:
print('Unknown format for path to conditions dictionary file. Return None.')
elif isinstance(input_conditions, tuple):
conditions_dict={x:input_conditions[0] if x not in dtree.categorical else input_conditions[1] for x in features}
else:
print('Unknown format for conditions dictionary file. Return None')
return None
if isinstance(conditions_dict, pd.DataFrame):
for v in ['feature', 'variable', 'var']:
if v in conditions_dict:
break
try:
conditions_dict=dict(conditions_dict.fillna('').set_index(v)['conditions'])
except Exception:
print("No 'feature' ,'variable', 'var' or 'conditions' field in input pandas.DataFrame. Return None.")
return None
#tree_df['split_feature'].dropna().unique().tolist()
categorical={}
for f in features:
if f not in conditions_dict:
conditions_dict[f]=''
categorical[f]=sum([isinstance(x,list) for x in tree_df[f]])>0
nodes_to_check=tree_df[tree_df['leaf']==False].sort_values(['depth', 'node'])['node'].tolist()
current_node_index=0
to_check=True
correct_all=True
while to_check:
node=nodes_to_check[current_node_index]
to_check=False
split_feature=tree_df.loc[tree_df['node']==node, 'split_feature'].values[0]
conditions=conditions_dict[split_feature]
if conditions is None:
if verbose:
print('Node', node, split_feature, ': <None> conditions specified, skipping..')
correct=True
else:
df_to_check=tree_df[(tree_df['parent_node']==node)][[split_feature, 'node', 'woe']]
if verbose:
print('Node', node, split_feature, (': Checking categorical business logic..' if categorical[split_feature] \
else ': Checking interval business logic..'))
correct=bl_check_categorical(df_to_check, conditions, verbose=verbose) if categorical[split_feature] \
else bl_check_interval(df_to_check, conditions, verbose=verbose)
correct_all=correct_all and correct
if correct==False and to_correct:
new_correct=False
if len(df_to_check['node'].unique())>2:
nodes_to_correct=sorted(df_to_check['node'].unique().tolist())
if max_corrections is None:
allowed_corrections=len(nodes_to_correct)-1
else:
allowed_corrections=min(len(nodes_to_correct)-1, max_corrections)
#print('correct', nodes_to_correct)
for cur_allowed_corrections in range(1,allowed_corrections):
new_correct, corrections=bl_recursive_correct(tree_df, node, allowed_corrections=cur_allowed_corrections, conditions=conditions,
max_corrections=allowed_corrections, verbose=verbose)
if new_correct:
break
if new_correct:
if verbose:
print('Successful corrections:', str(corrections))
for correction in corrections:
tree_df=dtree.unite_nodes(tree_df, correction)
if new_correct==False:
if verbose:
print('No successful corrections were found. Pruning node', node)
tree_df=dtree.prune(tree_df, node)
nodes_to_check=tree_df[tree_df['leaf']==False].sort_values(['depth', 'node'])['node'].tolist()
if current_node_index+1<len(nodes_to_check):
current_node_index+=1
to_check=True
if to_correct:
return True, tree_df
else:
return correct_all, tree_df
def color_result(self, x):
'''
TECH
Defines result cell color for excel export
Parameters
-----------
x: input values
Returns
--------
color description for style.apply()
'''
colors=[]
for e in x:
if e:
colors.append('background-color: green')
else:
colors.append('background-color: red')
return colors
#---------------------------------------------------------------
#added 13.08.2018 by <NAME>
class WOEOrderChecker(Processor):
'''
Class for WoE order checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, datasamples, dr_threshold=0.01, correct_threshold=0.85, woe_adjust=0.5, miss_is_incorrect=True,
verbose=False, out=False, out_images='WOEOrderChecker/'):
'''
Checks if WoE order of the feature remains stable in bootstrap
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
datasamples: an object of DataSamples type containing the samples to check input feature on
dr_threshold: if WoE order is not correct, then default rate difference between swaped bins is checked
correct_threshold: what part of checks on bootstrap should be correct for feature to pass the check
woe_adjust: woe adjustment factor (for Default_Rate_i formula)
miss_is_incorrect: is there is no data for a bin on bootstrap sample, should it be treated as error or not
verbose: if comments and graphs should be printed
out: a boolean for image output or a path for csv/xlsx output file to export woe and er values
out_images: a path for image output (default - WOEOrderChecker/)
Returns
----------
Boolean - whether the check was successful
and if isinstance(out, str) then dataframes with WoE and ER values for groups per existing sample
'''
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
w={x:feature.woes[x] for x in feature.woes if feature.woes[x] is not None}
woes_df=pd.DataFrame(w, index=['Train']).transpose().reset_index().rename({'index':'group'},axis=1).sort_values('group')
if isinstance(out, str):
out_woes=woes_df.copy()
if feature.data.weights is None:
out_er=woes_df.drop('Train', axis=1).merge(feature.data.dataframe[['group', feature.data.target]].groupby('group', as_index=False).mean(),
on='group').rename({feature.data.target:'Train'}, axis=1)
else:
for_er=feature.data.dataframe[['group', feature.data.target, feature.data.weights]]
for_er[feature.data.target]=for_er[feature.data.target]*for_er[feature.data.weights]
out_er=woes_df.drop('Train', axis=1).merge(for_er[['group', feature.data.target]].groupby('group', as_index=False).mean(),
on='group').rename({feature.data.target:'Train'}, axis=1)
cur_sample_woe=pd.DataFrame(columns=['group', 'woe', 'event_rate'])
samples=[datasamples.validate, datasamples.test]
sample_names=['Validate', 'Test']
for si in range(len(samples)):
if samples[si] is not None:
to_keep=[feature.feature, samples[si].target]
if samples[si].weights is not None:
to_keep.append(samples[si].weights)
cur_sample=samples[si].dataframe[to_keep]
cur_sample['group']=feature.set_groups(woes=feature.woes, original_values=True, data=cur_sample[feature.feature])
#cur_sample=cur_sample.sort_values('group')
if samples[si].weights is None:
N_b = cur_sample[samples[si].target].sum()
N_g = (1-cur_sample[samples[si].target]).sum()
else:
N_b = cur_sample[cur_sample[samples[si].target] == 1][samples[si].weights].sum()
N_g = cur_sample[cur_sample[samples[si].target] == 0][samples[si].weights].sum()
DR = N_b*1.0/N_g
index=-1
# for each interval
for gr_i in sorted(cur_sample['group'].unique()):
index=index+1
if samples[si].weights is None:
N_b_i = cur_sample[cur_sample['group']==gr_i][samples[si].target].sum()
N_g_i = cur_sample[cur_sample['group']==gr_i].shape[0] - N_b_i
else:
N_b_i = cur_sample[(cur_sample['group']==gr_i)&(cur_sample[samples[si].target] == 1)][samples[si].weights].sum()
N_g_i = cur_sample[(cur_sample['group']==gr_i)&(cur_sample[samples[si].target] == 0)][samples[si].weights].sum()
if not(N_b_i==0 and N_g_i==0):
DR_i = (N_b_i + woe_adjust)/(N_g_i + woe_adjust)
ER_i=N_b_i/(N_b_i+N_g_i)
n = N_g_i + N_b_i
smoothed_woe_i = np.log(DR*(feature.alpha + n)/(n*DR_i + feature.alpha))#*DR))
cur_sample_woe.loc[index]=[gr_i, smoothed_woe_i, ER_i]
out_woes=out_woes.merge(cur_sample_woe.drop('event_rate', axis=1), on='group').rename({'woe':samples[si].name}, axis=1)
out_er=out_er.merge(cur_sample_woe.drop('woe', axis=1), on='group').rename({'event_rate':samples[si].name}, axis=1)
else:
out_woes[sample_names[si]]=np.nan
out_er[sample_names[si]]=np.nan
if datasamples.bootstrap_base is not None:
if verbose:
fig = plt.figure(figsize=(15,7))
bootstrap_correct=[]
to_keep=[feature.feature, datasamples.bootstrap_base.target]+([datasamples.bootstrap_base.weights] if datasamples.bootstrap_base.weights is not None else [])
base_with_group=datasamples.bootstrap_base.dataframe[to_keep]
base_with_group['group']=feature.set_groups(woes=feature.woes, original_values=True, data=base_with_group[feature.feature])
for bn in range(len(datasamples.bootstrap)):
cur_sample_woe=pd.DataFrame(columns=['group', 'train_woe', 'woe', 'event_rate'])
cur_sample=base_with_group.iloc[datasamples.bootstrap[bn]]
#cur_sample['train_woe']=cur_sample['group'].apply(lambda x: feature.woes[x])
#cur_sample=cur_sample.sort_values('group')
if datasamples.bootstrap_base.weights is None:
N_b = cur_sample[datasamples.bootstrap_base.target].sum()
N_g = (1-cur_sample[datasamples.bootstrap_base.target]).sum()
else:
N_b = cur_sample[cur_sample[datasamples.bootstrap_base.target] == 1][datasamples.bootstrap_base.weights].sum()
N_g = cur_sample[cur_sample[datasamples.bootstrap_base.target] == 0][datasamples.bootstrap_base.weights].sum()
DR = N_b*1.0/N_g
index=-1
# for each interval
for gr_i in sorted(cur_sample['group'].unique()):
index=index+1
if datasamples.bootstrap_base.weights is None:
N_b_i = cur_sample[cur_sample['group']==gr_i][datasamples.bootstrap_base.target].sum()
N_g_i = cur_sample[cur_sample['group']==gr_i].shape[0] - N_b_i
else:
N_b_i = cur_sample[(cur_sample['group']==gr_i)&(cur_sample[datasamples.bootstrap_base.target] == 1)][datasamples.bootstrap_base.weights].sum()
N_g_i = cur_sample[(cur_sample['group']==gr_i)&(cur_sample[datasamples.bootstrap_base.target] == 0)][datasamples.bootstrap_base.weights].sum()
if not(N_b_i==0 and N_g_i==0):
DR_i = (N_b_i + woe_adjust)/(N_g_i + woe_adjust)
ER_i=N_b_i/(N_b_i+N_g_i)
n = N_g_i + N_b_i
smoothed_woe_i = np.log(DR*(feature.alpha + n)/(n*DR_i + feature.alpha))#*DR))
cur_sample_woe.loc[index]=[gr_i, feature.woes[gr_i], smoothed_woe_i, ER_i]
if isinstance(out, str):
out_woes=out_woes.merge(cur_sample_woe.drop('event_rate', axis=1), on='group').rename({'woe':'Bootstrap'+str(bn)}, axis=1).drop('train_woe', axis=1)
out_er=out_er.merge(cur_sample_woe.drop('woe', axis=1), on='group').rename({'event_rate':'Bootstrap'+str(bn)}, axis=1).drop('train_woe', axis=1)
cur_sample_woe['trend_train']=np.sign((cur_sample_woe['train_woe']-cur_sample_woe['train_woe'].shift(1)).dropna())
cur_sample_woe['trend']=np.sign((cur_sample_woe['woe']-cur_sample_woe['woe'].shift(1)).dropna())
cur_sample_woe['prev_event_rate']=cur_sample_woe['event_rate'].shift(1)
cur_sample_woe_error=cur_sample_woe[cur_sample_woe['trend_train']!=cur_sample_woe['trend']].dropna(how='all', subset=['trend_train','trend'])
cur_sample_correct=True
if cur_sample_woe.shape[0]!=0:
for ind, row in cur_sample_woe_error.iterrows():
if abs(row['event_rate']-row['prev_event_rate'])>dr_threshold:
cur_sample_correct=False
if miss_is_incorrect:
cur_sample_correct=cur_sample_correct and woes_df.merge(cur_sample_woe, on='group', how='left')['woe'].notnull().all()
if verbose:
line_color='green' if cur_sample_correct else 'red'
plt.plot(range(woes_df.shape[0]), woes_df.merge(cur_sample_woe, on='group', how='left')['woe'],
color=line_color, alpha=0.4)
bootstrap_correct.append(cur_sample_correct*1)
bootstrap_correct_part=sum(bootstrap_correct)/len(bootstrap_correct)
result=(bootstrap_correct_part>=correct_threshold)
if verbose:
plt.plot(range(woes_df.shape[0]), woes_df['Train'], color='blue', linewidth=5.0)
plt.ylabel('WoE')
plt.xticks(range(woes_df.shape[0]), woes_df['group'])
plt.suptitle(feature.feature, fontsize = 16)
fig.autofmt_xdate()
if out:
plt.savefig(out_images+feature.feature+".png", dpi=100, bbox_inches='tight')
plt.show()
print('Correct WoE order part = '+str(round(bootstrap_correct_part,4))+' ('+str(sum(bootstrap_correct))+' out of '+str(len(bootstrap_correct))+'), threshold = '+str(correct_threshold))
if bootstrap_correct_part<correct_threshold:
print('Not stable enough WoE order.')
else:
if verbose:
print('No bootstrap samples were found is DataSamples object. Return True.')
result=True
if isinstance(out, str):
return result, out_woes, out_er
else:
return result
def work_all(self, woe, features=None, drop_features=False, dr_threshold=0.01, correct_threshold=0.85, woe_adjust=0.5,
miss_is_incorrect=True, verbose=False, out=False, out_images='WOEOrderChecker/',
out_woe_low=0, out_woe_high=0, out_er_low=0, out_er_high=0):
'''
Checks if WoE order of all features from WOE object remains stable in bootstrap
Parameters
-----------
woe: an object of FeatureWOE type that should be checked
features: a list of features to check (if None, then all features from woe object will be checked)
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
dr_threshold: if WoE order is not correct, then default rate difference between swaped bins is checked
correct_threshold: what part of checks on bootstrap should be correct for feature to pass the check
woe_adjust: woe adjustment factor (for Default_Rate_i formula)
miss_is_incorrect: is there is no data for a bin on bootstrap sample, should it be treated as error or not
verbose: if comments and graphs should be printed
out: a boolean for image output or a path for xlsx output file to export woe and er values
out_images: a path for image output (default - WOEOrderChecker/)
out_woe_low: correcting coefficient for lower edge of WoE gradient scale (if out is str)
out_woe_high: correcting coefficient for upper edge of WoE gradient scale (if out is str)
out_er_low: correcting coefficient for lower edge of ER gradient scale (if out is str)
out_er_high: correcting coefficient for upper edge of ER gradient scale (if out is str)
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in self.feature_woes. Abort.')
return None
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
woe_order_correct={}
if isinstance(out, str):
woes_df=pd.DataFrame(columns=['feature', 'group', 'Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))])
er_df=pd.DataFrame(columns=['feature', 'group', 'Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))])
for feature in cycle_features:
if isinstance(out, str):
woe_order_correct[feature], woes_df_feature, er_df_feature=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, dr_threshold=dr_threshold,
correct_threshold=correct_threshold, woe_adjust=woe_adjust,
miss_is_incorrect=miss_is_incorrect, verbose=verbose, out=out, out_images=out_images)
woes_df_feature['feature']=feature
er_df_feature['feature']=feature
woes_df=woes_df.append(woes_df_feature, ignore_index=True)
er_df=er_df.append(er_df_feature, ignore_index=True)
else:
woe_order_correct[feature]=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, dr_threshold=dr_threshold,
correct_threshold=correct_threshold, woe_adjust=woe_adjust,
miss_is_incorrect=miss_is_incorrect, verbose=verbose, out=out, out_images=out_images)
if isinstance(out, str):
woes_df=woes_df[['feature', 'group', 'Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))]].dropna(axis=1)
er_df=er_df[['feature', 'group', 'Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))]].dropna(axis=1)
df_columns=[x for x in woes_df.columns if x not in ['feature', 'group']]
if out[-4:]=='.csv':
print('Unappropriate format for exporting two tables. Use .xlsx. Skipping export.')
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
woes_values=woes_df[df_columns].values.reshape(-1,).tolist()
woes_df.style.apply(color_background,
mn=np.mean(woes_values)-2*np.std(woes_values),
mx=np.mean(woes_values)+2*np.std(woes_values),
cmap='RdYlGn', subset=df_columns,
high=out_woe_high, low=out_woe_low).to_excel(writer, sheet_name='WoE by Samples', index=False)
er_values=er_df[df_columns].values.reshape(-1,).tolist()
er_df.style.apply(color_background,
mn=max([0,np.mean(er_values)-2*np.std(er_values)]),
mx=np.mean(er_values)+2*np.std(er_values),
cmap='RdYlGn_r', subset=df_columns,
high=out_er_high, low=out_er_low).to_excel(writer, sheet_name='Event Rate by Samples', index=False)
# Get the openpyxl objects from the dataframe writer object.
for worksheet in writer.sheets:
for x in writer.sheets[worksheet].columns:
writer.sheets[worksheet].column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 12
writer.save()
else:
print('Unknown format for export file. Use .xlsx. Skipping export.')
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if woe_order_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if woe_order_correct[x]}
return woe_order_correct
def work_tree(self, dtree, input_df=None, er_threshold=0.01, correct_threshold=0.85,
miss_is_incorrect=True, to_correct=False, max_corrections=2, verbose=False):
'''
Checks if WoE order of the tree remains stable in bootstrap and corrects the tree for it to pass the check
Parameters
-----------
dtree: a cross.DecisionTree object
input_df: a DataFrame, containing tree description
er_threshold: if WoE order is not correct, then event rate difference between swaped bins is checked
correct_threshold: what part of checks on bootstrap should be correct for tree to pass the check
miss_is_incorrect: if there is no data for a bin on bootstrap sample, should it be treated as error or not
to_correct: should there be attempts to correct tree by uniting nodes/groups or not
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
verbose: if comments and graphs should be printed (False - no output, True or 1 - actions log, 2 - actions and interim checks)
Returns
----------
if to_correct:
True and a DataFrame with tree description - corrected or initial
else:
result of the input tree check and the input tree itself
'''
#-----------------------------------------------Subsidiary functions--------------------------------------------------
def woeo_check(input_df, bootstrap_tree_list, er_threshold=0.01, correct_threshold=0.85, miss_is_incorrect=True, verbose=False):
'''
TECH
WoE order stabitility check
Parameters
-----------
input_df: a DataFrame, containing tree description
bootstrap_sample_list: a list of DataFrames, cantaining the same tree, as in tree_df, but with stats from bootstrap samples
er_threshold: if WoE order is not correct, then event rate difference between swaped bins is checked
correct_threshold: what part of checks on bootstrap should be correct for tree to pass the check
miss_is_incorrect: if there is no data for a bin on bootstrap sample, should it be treated as error or not
verbose: if comments and graphs should be printed
Returns
----------
boolean flag of current tree passing the check and
the dictionary of {group:number of errors}
'''
tree_df=input_df.copy()
tree_groups_stats=tree_df[['group','group_woe', 'group_target', 'group_amount']].rename({'group_woe':'train_woe'}, axis=1).\
dropna().drop_duplicates().reset_index(drop=True).sort_values('group')
tree_groups_stats['group_er']=tree_groups_stats['group_target']/tree_groups_stats['group_amount']
features=list(tree_df.columns[:tree_df.columns.get_loc('node')])
if verbose>True:
fig = plt.figure(figsize=(15,7))
bootstrap_correct=[]
bootstrap_groups_error={}
for i in range(len(bootstrap_tree_list)):
#display(bootstrap_tree_list[i][bootstrap_tree_list[i]['leaf']])
groups_stats=bootstrap_tree_list[i][bootstrap_tree_list[i]['leaf']][['group', 'group_woe']].drop_duplicates().\
merge(tree_groups_stats, on='group', how='left').sort_values('group')
groups_stats['trend_train']=np.sign((groups_stats['train_woe']-groups_stats['train_woe'].shift(1)).dropna())
groups_stats['trend']=np.sign((groups_stats['group_woe']-groups_stats['group_woe'].shift(1)).dropna())
groups_stats['prev_er']=groups_stats['group_er'].shift(1)
#display(groups_stats)
groups_error=groups_stats[groups_stats['trend_train']!=groups_stats['trend']].dropna(how='all', subset=['trend_train','trend'])
sample_correct=True
if groups_stats.shape[0]!=0:
for ind, row in groups_error.iterrows():
if abs(row['group_er']-row['prev_er'])>er_threshold:
if row['group'] in bootstrap_groups_error:
bootstrap_groups_error[row['group']]+=1
else:
bootstrap_groups_error[row['group']]=1
sample_correct=False
if miss_is_incorrect:
sample_correct=sample_correct and tree_groups_stats.merge(groups_stats, on='group', how='left')['group_woe'].notnull().all()
if verbose>True:
line_color='green' if sample_correct else 'red'
plt.plot(range(tree_groups_stats.shape[0]), tree_groups_stats.merge(groups_stats, on='group', how='left')['group_woe'],
color=line_color, alpha=0.4)
bootstrap_correct.append(sample_correct*1)
bootstrap_correct_part=sum(bootstrap_correct)/len(bootstrap_correct)
result=(bootstrap_correct_part>=correct_threshold)
if verbose>True:
plt.plot(range(tree_groups_stats.shape[0]), tree_groups_stats['train_woe'], color='blue', linewidth=5.0)
plt.ylabel('WoE')
plt.xticks(range(tree_groups_stats.shape[0]), tree_groups_stats['group'])
plt.suptitle('Tree on '+str(features), fontsize = 16)
fig.autofmt_xdate()
plt.show()
if verbose:
print('Correct WoE order part = '+str(round(bootstrap_correct_part,4))+' ('+str(sum(bootstrap_correct))+' out of '+str(len(bootstrap_correct))+'), threshold = '+str(correct_threshold))
if bootstrap_correct_part<correct_threshold:
print('Not stable enough WoE order.')
return result, bootstrap_groups_error
def woeo_recursive_correct(tree_df, bootstrap_sample_list, worst_group, allowed_corrections=1, corrections=None, verbose=False,
er_threshold=0.01, correct_threshold=0.85, miss_is_incorrect=True,
max_corrections=1):
'''
TECH
Recursive search of corrections needed for tree to pass WoE order stabitility check
Parameters
-----------
tree_df: a DataFrame, containing tree description
bootstrap_sample_list: a list of DataFrames, cantaining the same tree, as in tree_df, but with stats from bootstrap samples
worst_group: a number of group, in which the most part of errors was found during WoE order check (so this group switched places with the one before it)
allowed_corrections: a number of remaining corrections, that are allowed
corrections: the list of current corrections
verbose: if comments and graphs should be printed
er_threshold: if WoE order is not correct, then event rate difference between swaped bins is checked
correct_threshold: what part of checks on bootstrap should be correct for tree to pass the check
miss_is_incorrect: if there is no data for a bin on bootstrap sample, should it be treated as error or not
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
Returns
----------
boolean flag of corrected tree passing the check and
the list of corrections, that were made
'''
if corrections is None:
corrections=[]
if allowed_corrections>0:
possible_nodes_to_correct=[]
#each group is compared to the previous one and the worst_group has the most number of errors with the previous
#group (by woe), also groups are numbered by woe, so we need to check the worst, the previous one and also the one
#before previous and the one after the worst (because maybe we need to unite two groups before the worst, or
#the worst and the next or both variants)
for g in range(worst_group-2, worst_group+2):
group_filter=(tree_df['group']==g)
#if group exists and it is not a united group
if group_filter.sum()==1:
add_node_series=tree_df[group_filter]
possible_nodes_to_correct.append(add_node_series['node'].values[0])
#also for groups that were changing their places in woe order we are looking for adjacent nodes that are leaves
#and are not in united groups (because there are cases like this one: groups (3,1), (2,0), most errors are
#between 0 and 1, but above we are looking only at groups -1, 0, 1 and 2, so we also need to check the factual
#tree structure, not only groups woe order)
if g in [worst_group-1, worst_group]:
adjacent_filter=(tree_df['parent_node']==add_node_series['parent_node'].values[0])&(tree_df['node']!=add_node_series['node'].values[0])
if adjacent_filter.sum()==1:
adjacent_node_series=tree_df[adjacent_filter]
if tree_df[tree_df['group']==adjacent_node_series['group'].values[0]].shape[0]==1 and adjacent_node_series['leaf'].values[0]:
possible_nodes_to_correct.append(adjacent_node_series['node'].values[0])
possible_nodes_to_correct=sorted(list(set(possible_nodes_to_correct)))
combinations=[]
for n1 in range(len(possible_nodes_to_correct)):
first_node=possible_nodes_to_correct[n1]
for n2 in range(len(possible_nodes_to_correct[n1+1:])):
second_node=possible_nodes_to_correct[n1+1:][n2]
if dtree.check_unitability(tree_df, [first_node, second_node]):
first_node_series=tree_df[tree_df['node']==first_node]
parent_node=first_node_series['parent_node'].values[0]
split_feature=tree_df[(tree_df['node']==parent_node)]['split_feature'].values[0]
first_condition=first_node_series[split_feature].values[0]
if not(isinstance(first_condition, list) or isinstance(first_condition, tuple)):
nodes_combination=[second_node, first_node]
else:
nodes_combination=[first_node, second_node]
combinations.append([nodes_combination,
abs(first_node_series['woe'].values[0]- \
tree_df[tree_df['node']==second_node]['woe'].values[0])])
combinations.sort(key=itemgetter(1))
if verbose:
print('\t'*(max_corrections-allowed_corrections)+'Possible corrections (by nodes):',
str([x[0] for x in combinations]))
for nodes_to_unite, woe in combinations:
if verbose:
print('\t'*(max_corrections-allowed_corrections+1)+'Checking (level =',max_corrections-allowed_corrections+1,
'): uniting nodes', str(nodes_to_unite), 'with woe difference =', woe)
corrected_tree_df=dtree.unite_nodes(tree_df, nodes_to_unite)
corrected_bootstrap_sample_list=[]
for bs_tree in bootstrap_sample_list:
corrected_bootstrap_sample=dtree.unite_nodes(bs_tree, nodes_to_unite)
corrected_bootstrap_sample=corrected_bootstrap_sample.drop('group', axis=1).merge(corrected_tree_df[['node', 'group']], on='node', how='left')
corrected_bootstrap_sample_list.append(corrected_bootstrap_sample)
correct, errors = woeo_check(corrected_tree_df, corrected_bootstrap_sample_list, verbose=(verbose>True),
er_threshold=er_threshold, correct_threshold=correct_threshold,
miss_is_incorrect=miss_is_incorrect)
if correct:
if verbose:
print('\t'*(max_corrections-allowed_corrections+1)+'Corrections',
str(corrections+[nodes_to_unite]), 'succeeded!')
return correct, corrections+[nodes_to_unite]
else:
if allowed_corrections==1:
if verbose:
print('\t'*(max_corrections-allowed_corrections+1)+'Maximum correction level reached. Corrections',
str(corrections+[nodes_to_unite]), 'failed.')
else:
new_worst_group=int(max(errors, key=errors.get))
if verbose:
print('\t'*(max_corrections-allowed_corrections+1)+'Most errors were produced by',
new_worst_group, 'group. Trying to correct..')
correct, final_corrections=woeo_recursive_correct(corrected_tree_df, corrected_bootstrap_sample_list,
new_worst_group, allowed_corrections-1,
corrections+[nodes_to_unite], verbose,
er_threshold=er_threshold, correct_threshold=correct_threshold,
miss_is_incorrect=miss_is_incorrect, max_corrections=max_corrections)
if correct:
return correct, final_corrections
return False, corrections
#---------------------------------------------------------------------------------------------------------------
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
datasamples=dtree.datasamples
features=[x for x in dtree.features if x in tree_df]
#list(tree_df.columns[:tree_df.columns.get_loc('node')])
if max_corrections is None:
max_corrections=2
alpha=dtree.alpha
woe_adjust=dtree.woe_adjust
bootstrap_sample_list=[]
if datasamples.bootstrap_base is not None:
base_for_woe=datasamples.bootstrap_base.keep(features=features).dataframe
base_for_woe['node']=dtree.transform(base_for_woe, tree_df, ret_values=['node'])
base_for_woe['---weight---']=base_for_woe[datasamples.bootstrap_base.weights] if datasamples.bootstrap_base.weights is not None else 1
base_for_woe[datasamples.bootstrap_base.target]=base_for_woe[datasamples.bootstrap_base.target]*base_for_woe['---weight---']
base_for_woe=base_for_woe[['node', datasamples.bootstrap_base.target, '---weight---']]
for i in range(len(datasamples.bootstrap)):
for_woe=base_for_woe.iloc[datasamples.bootstrap[i]]
groups_stats=for_woe.groupby('node', as_index=False).sum()
groups_stats.columns=['node', 'target', 'amount']
sample_tree_df=tree_df[features+['node', 'parent_node', 'depth', 'leaf', 'split_feature', 'group']].merge(groups_stats, on=['node'], how='left')
for parent_node in sample_tree_df.sort_values('depth', ascending=False)['parent_node'].unique():
for v in ['target', 'amount']:
sample_tree_df.loc[sample_tree_df['node']==parent_node, v]=sample_tree_df.loc[sample_tree_df['parent_node']==parent_node, v].sum()
sample_tree_df['nontarget']=sample_tree_df['amount']-sample_tree_df['target']
good_bad=[sample_tree_df[sample_tree_df['leaf']]['nontarget'].sum(),
sample_tree_df[sample_tree_df['leaf']]['target'].sum()]
sample_tree_df['er']=sample_tree_df['target']/sample_tree_df['amount']
sample_tree_df['woe']=np.log(((good_bad[1]/good_bad[0])*(alpha + sample_tree_df['amount'])/ \
(sample_tree_df['amount']*((sample_tree_df['target'] + woe_adjust)/(sample_tree_df['nontarget'] + woe_adjust)) + alpha)).astype(float))
groupped=sample_tree_df[['group', 'target', 'nontarget', 'amount']].groupby('group', as_index=False).sum().rename({'target':'group_target', 'nontarget':'group_nontarget', 'amount':'group_amount'}, axis=1)
groupped['group_woe']=np.log(((good_bad[1]/good_bad[0])*(alpha + groupped['group_amount'])/ \
(groupped['group_amount']*((groupped['group_target'] + woe_adjust)/(groupped['group_nontarget'] + woe_adjust)) + alpha)).astype(float))
sample_tree_df=sample_tree_df.merge(groupped, on=['group'], how='left')
bootstrap_sample_list.append(sample_tree_df)
else:
if verbose:
print('No bootstrap samples were found. Skipping WoE order check..')
return True, tree_df
correct, errors = woeo_check(tree_df, bootstrap_sample_list, verbose=verbose,
er_threshold=er_threshold, correct_threshold=correct_threshold,
miss_is_incorrect=miss_is_incorrect)
if to_correct and correct==False:
worst_group=int(max(errors, key=errors.get))
if verbose:
print('Most errors were produced by', worst_group, 'group. Trying to correct..')
new_correct=False
allowed_corrections=min(tree_df[tree_df['leaf']].shape[0]-2, max_corrections)
#print('allowed_corrections', allowed_corrections)
for cur_allowed_corrections in range(1,allowed_corrections+1):
if verbose:
print('Current maximum number of corrections =', cur_allowed_corrections)
new_correct, corrections=woeo_recursive_correct(tree_df, bootstrap_sample_list, worst_group,
allowed_corrections=cur_allowed_corrections, corrections=[],
verbose=verbose,
er_threshold=er_threshold, correct_threshold=correct_threshold,
miss_is_incorrect=miss_is_incorrect, max_corrections=cur_allowed_corrections)
#print('new_correct', new_correct)
if new_correct:
break
if new_correct:
if verbose:
print('Successful corrections:', str(corrections))
for correction in corrections:
tree_df=dtree.unite_nodes(tree_df, correction)
correct=new_correct
else:
if verbose:
print('No successful corrections were found. Proceed to uniting groups..')
while correct==False:
if verbose:
print('Uniting groups:', worst_group, worst_group-1)
tree_df=dtree.unite_groups(tree_df, [worst_group, worst_group-1])
corrected_bootstrap_sample_list=[]
for bs_tree in bootstrap_sample_list:
corrected_bootstrap_sample=dtree.unite_groups(bs_tree, [worst_group, worst_group-1])
corrected_bootstrap_sample=corrected_bootstrap_sample.drop('group', axis=1).merge(tree_df[['node', 'group']], on='node', how='left')
corrected_bootstrap_sample_list.append(corrected_bootstrap_sample)
bootstrap_sample_list=corrected_bootstrap_sample_list.copy()
correct, errors = woeo_check(tree_df, bootstrap_sample_list, verbose=verbose,
er_threshold=er_threshold, correct_threshold=correct_threshold,
miss_is_incorrect=miss_is_incorrect)
worst_group=int(max(errors, key=errors.get)) if len(errors)>0 else None
if to_correct:
for g in tree_df['group'].unique():
group_nodes=tree_df[tree_df['group']==g]['node'].tolist()
for i in range(len(group_nodes)):
for j in range(len(group_nodes[i+1:])):
if dtree.check_unitability(tree_df, [group_nodes[i], group_nodes[i+1:][j]]):
if verbose:
print('Unitable nodes', str([group_nodes[i], group_nodes[i+1:][j]]), 'were found in the same group. Uniting..')
tree_df=dtree.unite_nodes(tree_df, [group_nodes[i], group_nodes[i+1:][j]])
return True, tree_df
else:
return correct, tree_df
#---------------------------------------------------------------
# Author - <NAME> 24.08.2018
class WaldBSChecker(Processor):
'''
Class for coefficient significance checking on bootstrap samples
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, model, woe, refit_data=None, drop_features=False, features_to_leave=None,
pvalue_threshold=0.05, correctness_threshold=0.85, verbose=False, out=False, out_images='WaldBSChecker/'):
'''
Checks model's coefficients significance in bootstrap. If drop_features==True, then features will be
dropped and the model will be refitted
Parameters
-----------
model: an object of LogisticRegressionModel type, whose coefficients should be checked
woe: a WOE object, containing feature transformations to WoE
refit_data: data with woe-transformed feature to refit model in case of drop_features=True
drop_features: should the features be dropped in case of not stable enough significance of their coefficients
features_to_leave: features not to be dropped in any case
pvalue_threshold: maximal p-value for Wald chi-square statistic for coefficient to be considered significant
correctness_threshold: minimal part of bootstrap samples on which coefficient stays significant for feature
to be considered having coefficient with stable enough significance
verbose: if comments and graphs should be printed
out: a boolean for image output or a path for xlsx output file to export p-value by iteration values
out_images: a path for image output (default - WaldBSChecker/)
Returns
----------
a dataframe with standard error, wald statistic and p-value for feature coefficients in case of not dropping them or
a list of features to stay
'''
if features_to_leave is None:
features_to_leave=[]
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
to_check_wald=True
if woe.datasamples.bootstrap_base is not None:
if isinstance(out,str) and (out[-4:]=='.xls' or out[-5:]=='.xlsx'):
writer = pd.ExcelWriter(out, engine='openpyxl')
with_woe=woe.transform(woe.datasamples.bootstrap_base, original_values=True, calc_gini=False)
it=0
while to_check_wald:
it=it+1
if out:
wald_df=pd.DataFrame(index=['intercept']+model.selected)
samples=[woe.datasamples.train, woe.datasamples.validate, woe.datasamples.test]
sample_names=['Train', 'Validate', 'Test']
for si in range(len(samples)):
if samples[si] is not None:
sample_wald=model.wald_test(samples[si], woe_transform=woe)
wald_df=wald_df.join(sample_wald[['feature', 'p-value']].set_index('feature')).rename({'p-value':sample_names[si]}, axis=1)
else:
wald_df[sample_names[si]]=np.nan
wald_df=wald_df.join(sample_wald[['feature','coefficient']].set_index('feature'))#.rename({0:'Train', 1:'Validate', 2:'Test'}, axis=1)
to_check_wald=False
wald_correct={}
for f in model.selected:
wald_correct[f]=0
for bn in range(len(woe.datasamples.bootstrap)):
if woe.datasamples.bootstrap_base.weights is not None:
w=model.wald_test(Data(with_woe.dataframe.iloc[woe.datasamples.bootstrap[bn]][model.selected+[woe.datasamples.bootstrap_base.target, woe.datasamples.bootstrap_base.weights]],
woe.datasamples.bootstrap_base.target, features=model.selected, weights=woe.datasamples.bootstrap_base.weights))
else:
w=model.wald_test(Data(with_woe.dataframe.iloc[woe.datasamples.bootstrap[bn]][model.selected+[woe.datasamples.bootstrap_base.target]],
woe.datasamples.bootstrap_base.target, features=model.selected))
if out:
wald_df=wald_df.join(w[['feature', 'p-value']].set_index('feature')).rename({'p-value':'Bootstrap'+str(bn)}, axis=1)
for f in model.selected:
wald_correct[f]=wald_correct[f]+(w[w['feature']==f]['p-value'].values[0]<pvalue_threshold)
for f in model.selected:
wald_correct[f]=wald_correct[f]/len(woe.datasamples.bootstrap)
if out:
#display(wald_df)
wald_df=wald_df[['coefficient', 'Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))]].dropna(axis=1)
wald_columns=[x for x in wald_df.columns if x!='coefficient']
for ind in wald_df.index:
p_value_list=wald_df[[x for x in wald_df.columns if x[:9]=='Bootstrap']].loc[ind].tolist()
mean=np.mean(p_value_list)
std=np.std(p_value_list)
sns.distplot(p_value_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,8))+', std = '+str(round(std,8)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Wald p-values in bootstrap')
plt.ylabel('Distribution')
plt.title(str(ind), fontsize = 16)
plt.savefig(out_images+str(ind)+"_"+str(it)+".png", dpi=100, bbox_inches='tight')
plt.close()
if isinstance(out,str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
wald_df.style.apply(color_background,
mn=0,
mx=pvalue_threshold,
cmap='RdYlGn_r',
subset=pd.IndexSlice[:, wald_columns]).to_excel(writer, sheet_name='Iteration '+str(it))
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Iteration '+str(it)]
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
for cell in worksheet[x[0].column]:
cell.number_format = '0.00000000'
worksheet.freeze_panes = worksheet['C2']
else:
print('Unknown or unappropriate format for export several tables. Use .xlsx. Skipping export.')
if drop_features and refit_data is not None:
if verbose:
display(wald_correct)
insignificant={x:refit_data.ginis[x] for x in wald_correct if wald_correct[x]<correctness_threshold and x not in features_to_leave}
if len(insignificant)>0:
feature_to_drop=min(insignificant, key=insignificant.get)
if verbose:
print('Dropping', feature_to_drop, 'because of not stable enough significance of coefficient: ', wald_correct[feature_to_drop], '(gini =', refit_data.ginis[feature_to_drop],')')
model.selected.remove(feature_to_drop)
model.fit(refit_data, selected_features=True)
if verbose:
model.draw_coefs()
to_check_wald=True
else:
if verbose:
print('drop_features==False or no refit_data was specified, so returning wald test results')
return wald_correct
if isinstance(out,str) and (out[-4:]=='.xls' or out[-5:]=='.xlsx'):
writer.save()
return model.selected
else:
print('No bootstrap samples!')
return None
#---------------------------------------------------------------
class FullnessAnalyzer():
'''
Visualizing data fullness by blocks (if provided), using cluster analysis to split data according
to its fullness and explaining clusters by training sklearn's Decision Tree on its labels (for
easier interpretation and determining, to which cluster each observation belongs)
For full functionality it needs hdbscan and fastcluster packages to be installed (because other methods
works too slow with highly-dimensional data exceeding 50000 obs). Otherwise only sklearn's k-means
clustering is available.
Also for Decision Tree visualization graphviz needs to be installed in OS and graphviz_converter_path
should be provided to .work and .explain_clusters methods (it is a path to dot.exe, which can transform
.dot file to .png for it to be displayed in the notebook)
Because sklearn's Decision Tree is used for explanation only interval values without inf or nan should be used.
The safe (but maybe not the best) aproach is to base explanation on already transformed features.
'''
def __init__(self, data=None, linked=None, clusterer=None, categorical=None):
'''
Parameters
-----------
stats: a pandas DataFrame, containing technical information for report generation
data: a Data object with transformed features and cluster labels (if the process has already reached the .get_clusters step)
linked: a linkage matrix, which is generated by hierarchy clustering (used for getting cluster labels from)
clusterer: a trained clusterer object (hdbscan.HDBSCAN or sklearn.cluster.KMeans) (used for getting cluster labels from)
categorical: a list of categorical features (their alues will be changed to -1 and 1 without 0)
'''
self.stats = pd.DataFrame()
self.data = data
self.linked = linked
self.clusterer = clusterer
self.conditions = None
self.categorical = None
def work(self, input_data=None, to_transform=True, to_train=True, to_explain=True,
features=None, categorical=None, exclude=None, blocks=None, interval_min_unique=None,
clusterer=None, clusterer_options=None, clusters_number=None,
explainer_features=None, explainer_max_leaf_nodes=None, explainer_max_depth=None, explainer_min_samples_leaf=None,
graphviz_converter_path='C:\\Program Files (x86)\\Graphviz2.38\\bin\\dot.exe', verbose=True):
'''
Fully process input data for fullness clustering meaning transformation to fullness values, visualizing this data,
clustering, getting cluster labels, visualizing fullness by clusters and explaining clusters with Decision Tree
Because sklearn's Decision Tree is used for explanation only interval values without inf or nan should be used. The
safe (but maybe not the best) aproach is to base explanation on already transformed features.
Parameters
-----------
input_data: a pandas DataFrame or a Data object to analyze
to_transform: should the input data be transformed to fullness values (-1 for missing, 0 for interval features' zero, 1 for the rest)
to_train: should the clusterer be trained (it can be omitted if it was already trained and we only need to get/explain clusters)
to_explain: should the clusters be explained
features: a list of features to analyze
categorical: a list of categorical features (their alues will be changed to -1 and 1 without 0)
exclude: a list of features to exclude from analysis (they can be used later for explanation)
blocks: a path to Excel file or a pandas DataFrame with blocks information (features will be sorted by blocks during visualization)
interval_min_unique: a minimal number of unique values for feature to be considered interval (if categorical is None)
clusterer: a type of clusterer to be used. Default is k-means.
'k-means' - partitions observations into k clusters in which each observation belongs to the cluster with the nearest mean,
serving as a prototype of the cluster. This is the fastest method with usually poor results.
'hierarchy'- hierarchical agglomerative clustering seeks to build a hierarchy of clusters. This is a "bottom-up" approach:
each observation starts in its own cluster, and pairs of clusters are merged as one moves up the hierarchy. For
60000 observations and 500 features it works for about 40 minutes. Number of clusters can be adjusted without
need to retrain the clusterer.
'hdbscan' - Hierarchical Density-Based Spatial Clustering of Applications with Noise. Searches for the areas with the higher
density allowing different values of variance for each cluster and existance of points without clusters (label -1).
For 60000 observations and 500 features it works for about 55 minutes. Number of clusters cannot be adjusted directly,
only by changing clusterer options like min_cluster_size, min_samples etc, clusterer must be retrained.
clusterer_options: a dictionary of options for clusterer. Contents depend on clusterer type (see main options below):
'k-means' (for more information look for sklearn.cluster.KMeans):
init - the method for choosing centroids, 'random' and 'k-means++' are available
n_clusters - the desired number of clusters
n_init - the number of iterations to choose best clustering from
'hierarchy' (for more information look for http://danifold.net/fastcluster.html):
method - for memory-friendly clustering available methods are centroid (worked best so far), median, ward and single.
For memory-hungry clustering other methods can be used: complete, average, weighted. For a dataset with 60000 observations
and 500 features 'complete' method used about 10 GB RAM and crushed. Use on your own risk.
metric - for centroid, median and ward methods only euclidian metric can be used. For other methods these metrics are available:
euclidian, sqeuclidean, seuclidean, mahalanobis, cityblock, chebychev, minkowski, cosine, correlation, canberra, braycurtis,
hamming, jaccard, yule, dice, rogerstanimoto, russellrao, sokalsneath, kulsinski, matching (sokalmichener), user-defined.
'hdbscan' (for more information look for https://hdbscan.readthedocs.io/en/latest/):
min_cluster_size - the minimal number of observations to form a cluster
min_samples - number of samples in a neighbourhood for a point to be considered a core point. The larger this values is
the more conservative clustering will be (more points will be considered as noise)
clusters_number: the desired number of clusters to get from clusterer (used for k-means during training and for hierarchy after training,
for hdbscan this option is ignored)
explainer_features: a list of features to train Decision Tree for cluster labels prediction on (if None all features are used)
explainer_max_leaf_nodes: a maximal number of explanation tree leaves (if None, then number of clusters is used)
explainer_max_depth: a maximal depth of explanation tree (if None, then number of clusters - 1 is used)
explainer_min_samples_leaf: a minimal number of observations in an explanation tree leaf (if None, then minimal cluster size is used)
graphviz_converter_path: a path to dot.exe, which can transform .dot file to .png for it to be displayed in the notebook
verbose: a flag for detailed output (including fullness visualization)
Returns
----------
a dataframe with clusters explanation and statistics
'''
to_self=False
if input_data is None:
to_self=True
input_data=self.data
if to_transform:
print('Transforming data according to its fullness..')
processed_data=self.transform(input_data, features=features, categorical=categorical, exclude=exclude,
interval_min_unique=interval_min_unique)
to_self=True
else:
processed_data=copy.deepcopy(input_data)
if verbose:
print('Visualizing data fullness..')
self.visualize(processed_data, features=features, blocks=blocks, to_transform=False, exclude=exclude,
text_color = 'k', figsize=(15,5))
if to_train:
print('Training', clusterer if clusterer is not None else '', 'clusterer..')
self.train_clusterer(processed_data, features=features, exclude=exclude, clusterer=clusterer,
options=clusterer_options, clusters_number=clusters_number, verbose=verbose)
print('Getting clusters..')
processed_data.dataframe['cluster']=self.get_clusters(clusterer=clusterer, number=clusters_number, to_self=to_self)
if verbose:
print('Visualizing data fullness by clusters..')
self.visualize(processed_data, features=features, blocks=blocks, groups=['cluster'], to_transform=False,
text_color = 'k', figsize=(15,5))
if to_explain:
print('Training explanation Decision Tree..')
return self.explain_clusters(processed_data, cluster='cluster', features=explainer_features,
max_leaf_nodes=explainer_max_leaf_nodes,
max_depth=explainer_max_depth, min_samples_leaf=explainer_min_samples_leaf,
graphviz_converter_path=graphviz_converter_path)
def transform(self, input_data=None, features=None, categorical=None, exclude=None, interval_min_unique=None):
'''
Transforms input data features to fullness values (-1 for missing, 0 for interval features' zero, 1 for the rest)
Parameters
-----------
input_data: a pandas DataFrame or a Data object to analyze
features: a list of features to analyze
categorical: a list of categorical features (their values will be changed to -1 and 1 without 0)
exclude: a list of features to exclude from analysis (they can be used later for explanation)
interval_min_unique: a minimal number of unique values for feature to be considered interval (if categorical is None)
Returns
----------
a Data object, containing transformed data
'''
def lists_contents_equal(a,b):
return sorted([x for x in a if pd.isnull(x)==False])==sorted([x for x in b if pd.isnull(x)==False]) and \
((np.nan in a and np.nan in b) or (np.nan not in a and np.nan not in b))
if interval_min_unique is None:
interval_min_unique=30
if input_data is None:
input_data=self.data
if isinstance(input_data, pd.DataFrame):
data=Data(input_data)
else:
data=copy.deepcopy(input_data)
if features is None:
features=data.features
if exclude is not None:
features=[x for x in features if x not in exclude]
if categorical is None and self.categorical is not None:
categorical=self.categorical.copy()
if categorical is None:
categorical=[]
interval=[]
for f in features:
unique=data.dataframe[f].unique()
field_type=data.dataframe[f].dtype
if field_type==object or unique.shape[0]<interval_min_unique or \
(lists_contents_equal(unique, [0,1]) or \
lists_contents_equal(unique, [0,1, np.nan])):
categorical.append(f)
else:
interval.append(f)
else:
interval=[x for x in features if x not in categorical]
self.categorical=categorical.copy()
result_data=data.dataframe.copy()
result_data[categorical]=result_data[categorical].applymap(lambda x: -1 if pd.isnull(x) else 1)
result_data[interval]=result_data[interval].applymap(lambda x: -1 if pd.isnull(x) else 1 if x!=0 else 0)
self.data=Data(result_data)
return self.data
def visualize(self, input_data=None, features=None, groups=None, blocks=None, to_transform=True, exclude=None,
show_features_labels=False, text_color = 'k', figsize=(15,5)):
'''
Visualize data fullness by features, sorted in their blocks order (if provided). For each combination of groups' features' values
separate plot is made.
Parameters
-----------
input_data: a pandas DataFrame or a Data object to analyze
features: a list of features to analyze
groups: a list of features to draw separate plots for (a plot for a combination of features' values)
blocks: a path to Excel file or a pandas DataFrame with blocks information (features will be sorted by blocks during visualization)
to_transform: should the input data be transformed to fullness values (-1 for missing, 0 for interval features' zero, 1 for the rest)
exclude: a list of features to exclude from analysis (they can be used later for explanation)
show_features_labels: should feature labels be shown on the plot or not (if True, then no blocks information will be displayed)
text_color: a color for all text in plots (including ticks, axis and legend)
figsize: a size for plots' figure
'''
if input_data is None:
input_data=self.data
if isinstance(input_data, pd.DataFrame):
data=Data(input_data)
else:
data=copy.deepcopy(input_data)
if features is None:
features=data.features
if groups is not None:
features=[x for x in features if x not in groups]
try:
groups_data=data.dataframe[groups].drop_duplicates().sort_values(groups).reset_index(drop=True).reset_index()
except Exception:
print("No specified groups columns in input data. Return None.")
return None
groups_number=groups_data.shape[0]
else:
groups_number=1
if exclude is not None:
features=[x for x in features if x not in exclude]
if blocks is not None and show_features_labels==False:
if isinstance(blocks, str):
blocks=pd.read_excel(blocks)
blocks.columns=blocks.columns.str.lower()
for v in ['feature', 'variable', 'var', 'column']:
if v in blocks:
break
try:
blocks=blocks.sort_values(['block', v])
except Exception:
print("No 'feature' ,'variable', 'var', 'column' or 'block' field in input data. Return None.")
return None
blocks=blocks[blocks[v].isin(features)].reset_index(drop=True)
nd_block=[{v:f, 'block':'Not defined'} for f in features if f not in blocks[v].tolist()]
blocks=blocks.append(pd.DataFrame(nd_block), ignore_index=True).sort_values(['block', v]).reset_index(drop=True)
blocks_first=blocks.groupby('block').first().reset_index()
blocks_edges_x=list(blocks[blocks[v].isin(blocks_first[v])].index)+[blocks.shape[0]]
blocks_labels_x=[(blocks_edges_x[i]+blocks_edges_x[i+1])/2 for i in range(len(blocks_edges_x)-1)]
blocks_edges_x=blocks_edges_x[1:-1]
features=blocks[v].tolist()
else:
features=sorted(features)
if to_transform:
print('Transforming data according to its fullness..')
data=self.transform(data, features=features)
c_text=matplotlib.rcParams['text.color']
c_axes=matplotlib.rcParams['axes.labelcolor']
c_xtick=matplotlib.rcParams['xtick.color']
c_ytick=matplotlib.rcParams['ytick.color']
matplotlib.rcParams['text.color'] = text_color
matplotlib.rcParams['axes.labelcolor'] = text_color
matplotlib.rcParams['xtick.color'] = text_color
matplotlib.rcParams['ytick.color'] = text_color
group_stats={}
f, axes = plt.subplots(groups_number, 1, sharex=True, figsize=figsize)
if isinstance(axes, np.ndarray)==False:
axes=[axes]
for g in range(groups_number):
if groups is not None:
current_data=data.dataframe.merge(groups_data[groups_data['index']==g].drop('index', axis=1), on=groups, how='inner')
else:
current_data=data.dataframe
group_stats[g]=current_data[features].apply(pd.value_counts)
group_stats[g]=group_stats[g]/group_stats[g].sum()
group_stats[g].T.plot(kind='bar', ax=axes[g], stacked=True, width=1, legend=False, grid=False, ylim=(0,1))
handles, _ = axes[g].get_legend_handles_labels()
if groups_number>1:
axes[g].set_ylabel(str(dict(groups_data[groups].iloc[g])).replace('{', '').replace('}', '').replace("'", '')+'\namount = '+str(current_data.shape[0]),
rotation=0, ha='right', va='center')
if blocks is not None and show_features_labels==False:
axes[g].set_xticks(blocks_labels_x)
axes[g].set_xticklabels(blocks_first.block.tolist())
for edge in blocks_edges_x:
axes[g].axvline(edge-0.5, ymin=-0.5 if g!=groups_number-1 else 0, ymax=1.5 if g!=0 else 1,
linestyle='--', color='red', alpha=1, lw=1, clip_on=False)
elif show_features_labels:
axes[g].set_xticks([i for i in range(len(features))])
axes[g].set_xticklabels(features)
else:
axes[g].set_xticks([])
axes[g].yaxis.set_major_formatter(mtick.PercentFormatter(1))
labels=['Пропущенные' if x==-1 else '0 (для интервальных)' if x==0 else 'Не пропущенные' if x==1 else x \
for x in sorted(np.unique(data.dataframe[features]))]
f.legend(handles=handles, labels=labels,ncol=len(labels), bbox_to_anchor=(0.5,1), loc='center')
plt.tight_layout()
plt.show()
matplotlib.rcParams['text.color'] = c_text
matplotlib.rcParams['axes.labelcolor'] = c_axes
matplotlib.rcParams['xtick.color'] = c_xtick
matplotlib.rcParams['ytick.color'] = c_ytick
def train_clusterer(self, input_data=None, features=None, exclude=None, clusterer=None, options=None,
clusters_number=None, verbose=True):
'''
Trains the chosen clusterer to obtain cluster labels or a linkage matrix for them (for hierarchy clusterer)
Parameters
-----------
input_data: a pandas DataFrame or a Data object to analyze
features: a list of features to analyze
exclude: a list of features to exclude from analysis (they can be used later for explanation)
clusterer: a type of clusterer to be used. Default is k-means.
'k-means' - partitions observations into k clusters in which each observation belongs to the cluster with the nearest mean,
serving as a prototype of the cluster. This is the fastest method with usually poor results.
'hierarchy'- hierarchical agglomerative clustering seeks to build a hierarchy of clusters. This is a "bottom-up" approach:
each observation starts in its own cluster, and pairs of clusters are merged as one moves up the hierarchy. For
60000 observations and 500 features it works for about 40 minutes. Number of clusters can be adjusted without
need to retrain the clusterer.
'hdbscan' - Hierarchical Density-Based Spatial Clustering of Applications with Noise. Searches for the areas with the higher
density allowing different values of variance for each cluster and existance of points without clusters (label -1).
For 60000 observations and 500 features it works for about 55 minutes. Number of clusters cannot be adjusted directly,
only by changing clusterer options like min_cluster_size, min_samples etc, clusterer must be retrained.
options: a dictionary of options for clusterer. Contents depend on clusterer type (see main options below):
'k-means' (for more information look for sklearn.cluster.KMeans):
init - the method for choosing centroids, 'random' and 'k-means++' are available
n_clusters - the desired number of clusters
n_init - the number of iterations to choose best clustering from
'hierarchy' (for more information look for http://danifold.net/fastcluster.html):
method - for memory-friendly clustering available methods are centroid (worked best so far), median, ward and single.
For memory-hungry clustering other methods can be used: complete, average, weighted. For a dataset with 60000 observations
and 500 features 'complete' method used about 10 GB RAM and crushed. Use on your own risk.
metric - for centroid, median and ward methods only euclidian metric can be used. For other methods these metrics are available:
euclidian, sqeuclidean, seuclidean, mahalanobis, cityblock, chebychev, minkowski, cosine, correlation, canberra, braycurtis,
hamming, jaccard, yule, dice, rogerstanimoto, russellrao, sokalsneath, kulsinski, matching (sokalmichener), user-defined.
'hdbscan' (for more information look for https://hdbscan.readthedocs.io/en/latest/):
min_cluster_size - the minimal number of observations to form a cluster
min_samples - number of samples in a neighbourhood for a point to be considered a core point. The larger this values is
the more conservative clustering will be (more points will be considered as noise)
clusters_number: the desired number of clusters to get from clusterer (used for k-means during training and for hierarchy after training,
for hdbscan this option is ignored)
verbose: a flag for detailed output
'''
if input_data is None:
input_data=self.data
if isinstance(input_data, pd.DataFrame):
data=Data(input_data)
else:
data=copy.deepcopy(input_data)
if features is None:
features=data.features.copy()
if exclude is not None:
features=[x for x in features if x not in exclude]
if clusterer is None:
clusterer='k-means'
if options is None:
if clusterer=='hierarchy':
options={'method':'centroid'}
if clusterer=='hdbscan':
options={'min_cluster_size':1000, 'min_samples':500}
if clusterer=='k-means':
options={'init':'random', 'n_clusters':3 if clusters_number is None else clusters_number, 'n_init':10}
if verbose:
print('-- Starting at: '+str(datetime.datetime.now()))
if clusterer=='hierarchy':
if 'method' in options and options['method'] in ['complete', 'average', 'weighted']:
self.linked = fastcluster.linkage(data.dataframe[features], **options)
else:
self.linked = fastcluster.linkage_vector(data.dataframe[features], **options)
if clusterer=='hdbscan':
self.clusterer = hdbscan.HDBSCAN(**options)
self.clusterer.fit(data.dataframe[features])
if clusterer=='k-means':
self.clusterer = KMeans(**options)
self.clusterer.fit(data.dataframe[features])
if verbose:
print('-- Finished at: '+str(datetime.datetime.now()))
def get_clusters(self, clusterer=None, number=None, to_self=True):
'''
Gets cluster labels as a pandas Series object for the data, stored in self.data (basicaly for the data, that was used
to train clusterer on)
Parameters
-----------
clusterer: a type of clusterer that was used (there is a different behavior for retrieving cluster labels for different clusterers).
Available clusterers: 'k-means' (default), 'hierarchy', 'hdbscan'
number: the desired number of clusters to get from clusterer (used for hierarchy clusterer, for other clusterers is ignored)
to_self: should cluster labels be written to self.data.dataframe.cluster
Returns
----------
a pandas Series object, containing cluster labels
'''
if clusterer is None:
clusterer='k-means'
if clusterer=='hdbscan' and number is not None:
print('With hdbscan clusterer there is no way to directly choose the number of clusters. Use k-means of hierarchy instead.')
if clusterer=='hierarchy':
result=fcluster(self.linked, 3 if number is None else number , criterion='maxclust')
if clusterer in ['hdbscan', 'k-means']:
result=self.clusterer.labels_
if to_self:
self.data.dataframe['cluster']=result
return result
def change_clusters(self, replace_dict, cluster='cluster', input_data=None):
'''
Changes values of clusters based on replace dictionary in format {old_value: new_value}
Parameters
-----------
replace_dict: a dictionary in format {old_value: new_value} to change cluster labels
cluster: a field with cluster labels
input_data: a pandas DataFrame or score-kit Data object, containing the filed with cluster labels
Returns
----------
a changed pandas DataFrame or score-kit Data object
'''
to_self=False
if input_data is None:
to_self=True
input_data=self.data
if isinstance(input_data, pd.DataFrame):
data=Data(input_data)
else:
data=copy.deepcopy(input_data)
data.dataframe[cluster]=data.dataframe[cluster].replace(replace_dict)
if to_self:
self.data=copy.deepcopy(data)
return data
def explain_clusters(self, input_data=None, cluster='cluster', features=None,
max_leaf_nodes=None, max_depth=None, min_samples_leaf=None,
graphviz_converter_path='C:\\Program Files (x86)\\Graphviz2.38\\bin\\dot.exe'):
'''
Trains an sklearn's Decision tree to predict cluster labels based on input features, then visualizes it
and returns a pandas DataFrame with clusters explanation
For Decision Tree visualization graphviz needs to be installed in OS and graphviz_converter_path
should be provided to .work and .explain_clusters methods (it is a path to dot.exe, which can transform
.dot file to .png for it to be displayed in the notebook)
Parameters
-----------
input_data: a pandas DataFrame or a Data object to analyze
cluster: the name of column, containing cluster labels
features: a list of features to train Decision Tree for cluster labels prediction on (if None all features are used)
max_leaf_nodes: a maximal number of explanation tree leaves (if None, then number of clusters is used)
max_depth: a maximal depth of explanation tree (if None, then number of clusters - 1 is used)
min_samples_leaf: a minimal number of observations in an explanation tree leaf (if None, then minimal cluster size is used)
graphviz_converter_path: a path to dot.exe, which can transform .dot file to .png for it to be displayed in the notebook
Returns
----------
a dataframe with clusters explanation and statistics
'''
def recursive_tree_conditions_generator(tree, features, node, leaves=None, input_leaf=None):
'''
TECH
Recursively passes through the tree to get each leaf's splits and statistics
'''
if tree.children_left[node]==-1 and tree.children_right[node]==-1:
current_leaf=copy.deepcopy(input_leaf)
for cn in range(len(tree.value[node][0])):
current_leaf[cn]=tree.value[node][0][cn]
leaves.append(current_leaf)
return leaves
else:
current_leaf=copy.deepcopy(input_leaf)
if features[tree.feature[node]] in current_leaf:
condition=current_leaf[features[tree.feature[node]]]
current_leaf[features[tree.feature[node]]]=[condition[0], tree.threshold[node]]
else:
current_leaf[features[tree.feature[node]]]=[-np.inf, tree.threshold[node]]
leaves=recursive_tree_conditions_generator(tree, features, tree.children_left[node], leaves, current_leaf)
current_leaf=copy.deepcopy(input_leaf)
if features[tree.feature[node]] in current_leaf:
condition=current_leaf[features[tree.feature[node]]]
current_leaf[features[tree.feature[node]]]=[tree.threshold[node], condition[1]]
else:
current_leaf[features[tree.feature[node]]]=[tree.threshold[node], np.inf]
leaves=recursive_tree_conditions_generator(tree, features, tree.children_right[node], leaves, current_leaf)
return leaves
if input_data is None:
input_data=self.data
if isinstance(input_data, pd.DataFrame):
data=Data(input_data)
else:
data=copy.deepcopy(input_data)
if features is None:
features=data.features.copy()
features=[ x for x in features if x!=cluster]
if max_leaf_nodes is None:
max_leaf_nodes=data.dataframe[cluster].unique().shape[0]
if max_depth is None:
max_depth=data.dataframe[cluster].unique().shape[0]-1
if min_samples_leaf is None:
min_samples_leaf=min(data.dataframe[cluster].value_counts())
clf = DecisionTreeClassifier(criterion='gini',
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf)
clf.fit(data.dataframe[features], y=data.dataframe[cluster])
if graphviz_converter_path is not None:
export_graphviz(clf, out_file="cluster_tree.dot", feature_names=features, filled = True, rounded = True)
try:
system('"'+graphviz_converter_path+'" -Tpng cluster_tree.dot -o cluster_tree.png')
display(Display_Image(filename='cluster_tree.png'))
except Exception:
print('Executable dot.exe was not found at the specified address. \n'+
'Please make sure, that graphviz is installed on your system and provide the correct address for dot converter.')
conditions_df=pd.DataFrame(recursive_tree_conditions_generator(clf.tree_, features, 0, [], {}))
conditions_df=conditions_df.rename({i:clf.classes_[i] for i in range(len(clf.classes_))}, axis=1)
conditions_df=conditions_df[[x for x in conditions_df if x not in clf.classes_.tolist()] +clf.classes_.tolist()]
self.conditions=conditions_df.copy()
return conditions_df
def split_data(self, input_data, not_transformed=None, use_index_as_cluster=False):
'''
Splits input pandas DataFrame of Data object into clusters according to conditions table and returns the
dictionary of Data objects. The key of this dictionary is the cluster number and it is determined as
the cluster with most observations in the current part. Sometimes it may not work correctly, in that case
cluster number can be set to the conditions table index (with use_index_as_cluster option)
Parameters
-----------
input_data: a pandas DataFrame or a Data object to split
not_transformed: a list of features that were not transformed to their's fullness values, for them conditions
will be interpreted as the simple conditions for interval features (for the rest of the features
conditions will be changed according to the fullness coding)
use_index_as_cluster: if True the key of the result dictionary will be set to the index value of conditions
Returns
----------
a dictionary with pandas DataFrame or Data objects
'''
if self.conditions is None:
print('No conditions were found. Please, tun .explain_clusters method. Return None.')
return None
if isinstance(input_data, Data):
data=input_data.dataframe.copy()
else:
data=input_data.copy()
if not_transformed is None:
not_transformed=[]
features=[]
for f in self.conditions:
if self.conditions[f].apply(lambda x: isinstance(x, list) or pd.isnull(x)).all():
features.append(f)
result={}
for ind, conditions in self.conditions[features].iterrows():
df_filter=pd.Series([True]*data.shape[0], index=data.index)
for fn in range(len(features)):
f=features[fn]
if isinstance(conditions[fn], list):
if f in not_transformed:
df_filter=df_filter & ((data[f]>conditions[f][0])&(data[f]<=conditions[f][1]))
else:
new_filter=pd.Series([False]*data.shape[0], index=data.index)
if -1>conditions[f][0] and -1<=conditions[f][1]:
new_filter=new_filter | ( | pd.isnull(data[f]) | pandas.isnull |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df= | pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context') | pandas.melt |
import numpy as np
import pandas as pd
import pkg_resources
import seaborn as sns
import matplotlib.pyplot as plt
from math import sqrt
from GIPlot import GIPlot
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import mean_squared_error
from adjustText import adjust_text
from dualguide import read_gi_library
from crispy.DataImporter import CRISPR
from crispy.CRISPRData import ReadCounts
from crispy import CrispyPlot, QCplot, Utils
DPATH = pkg_resources.resource_filename("data", "dualguide/")
RPATH = pkg_resources.resource_filename("notebooks", "dualguide/reports/")
def pairgrid(df, annotate_guides=False, ntop_annotate=3):
def triu_plot(x, y, color, label, **kwargs):
ax = plt.gca()
z = CrispyPlot.density_interpolate(x, y)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
ax.scatter(x, y, c=z, **kwargs)
ax.axhline(0, ls=":", lw=0.1, c="#484848", zorder=0)
ax.axvline(0, ls=":", lw=0.1, c="#484848", zorder=0)
(x0, x1), (y0, y1) = ax.get_xlim(), ax.get_ylim()
lims = [max(x0, y0), min(x1, y1)]
ax.plot(lims, lims, ls=":", lw=0.1, c="#484848", zorder=0)
if annotate_guides:
diff = (x - y).sort_values()
idxs = list(diff.head(ntop_annotate).index) + list(
diff.tail(ntop_annotate).index
)
texts = [
ax.text(
x.loc[i],
y.loc[i],
";".join(lib.loc[i, ["gene1", "gene2"]]),
color="k",
fontsize=4,
)
for i in idxs
]
adjust_text(
texts,
arrowprops=dict(arrowstyle="-", color="k", alpha=0.75, lw=0.3),
ax=ax,
)
def diag_plot(x, color, label, **kwargs):
sns.distplot(x, label=label)
grid = sns.PairGrid(df, height=1.1, despine=False)
grid.fig.subplots_adjust(wspace=0.05, hspace=0.05)
plt.gcf().set_size_inches(2 * df.shape[1], 2 * df.shape[1])
grid.map_diag(diag_plot, kde=True, hist_kws=dict(linewidth=0), bins=30)
for i, j in zip(*np.tril_indices_from(grid.axes, -1)):
ax = grid.axes[i, j]
r, p = spearmanr(df.iloc[:, i], df.iloc[:, j], nan_policy="omit")
rmse = sqrt(mean_squared_error(df.iloc[:, i], df.iloc[:, j]))
ax.annotate(
f"R={r:.2f}\nRMSE={rmse:.2f}",
xy=(0.5, 0.5),
xycoords=ax.transAxes,
ha="center",
va="center",
fontsize=9,
)
grid.map_upper(triu_plot, marker="o", edgecolor="", cmap="Spectral_r", s=2)
def scattergrid(
df,
columns,
xprefix,
yprefix,
density=True,
highlight_text=None,
xlabel="",
ylabel="",
add_corr=True,
n_highlight=10,
fontsize=3,
):
fig, axs = plt.subplots(
1, len(columns), figsize=(2 * len(columns), 2), sharex="all", sharey="all"
)
for i, s in enumerate(columns):
ax = axs[i]
df_s = pd.concat(
[df[f"{xprefix}_{s}"].rename("x"), df[f"{yprefix}_{s}"].rename("y")],
axis=1,
sort=False,
).dropna(subset=["x", "y"])
if density:
df_s["z"] = CrispyPlot.density_interpolate(df_s["x"], df_s["y"])
df_s = df_s.sort_values("z")
ax.scatter(
df_s["x"],
df_s["y"],
c=df_s["z"] if density else CrispyPlot.PAL_DBGD[0],
marker="o",
edgecolor="",
cmap="Spectral_r" if density else None,
s=3,
)
ax.axhline(0, ls=":", lw=0.1, c="#484848", zorder=0)
ax.axvline(0, ls=":", lw=0.1, c="#484848", zorder=0)
(x0, x1), (y0, y1) = ax.get_xlim(), ax.get_ylim()
lims = [max(x0, y0), min(x1, y1)]
ax.plot(lims, lims, ls=":", lw=0.1, c="#484848", zorder=0)
if highlight_text is not None:
diff = (df_s["x"] - df_s["y"]).sort_values()
idxs = list(diff.head(n_highlight).index) + list(
diff.tail(n_highlight).index
)
texts = [
ax.text(
df_s["x"].loc[i],
df_s["y"].loc[i],
";".join(df.loc[i, highlight_text]),
color="k",
fontsize=fontsize,
)
for i in idxs
]
adjust_text(
texts,
arrowprops=dict(arrowstyle="-", color="k", alpha=0.75, lw=0.3),
ax=ax,
)
if add_corr:
r, p = spearmanr(df_s["x"], df_s["y"])
ax.annotate(
f"R={r:.2f},p={p:.1e}" if p != 0 else f"R={r:.2f},p<0.0001",
xy=(0.01, 0.01),
xycoords=ax.transAxes,
ha="left",
va="bottom",
fontsize=5,
)
ax.set_title(f"{s} (N={df_s.shape[0]})")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel if i == 0 else "")
fig.subplots_adjust(wspace=0.05, hspace=0.05)
ESS_GENES = Utils.get_essential_genes(return_series=False)
NESS_GENES = Utils.get_non_essential_genes(return_series=False)
def classify_gene(gene="ARID1A", gene_chr="1"):
if (str(gene) == "nan") & (str(gene_chr) == "nan"):
return "non-targeting"
if (str(gene) == "nan") & (str(gene_chr) != "nan"):
return "intergenic"
elif gene in ESS_GENES:
return "essential"
elif gene in NESS_GENES:
return "non-essential"
else:
return "unclassified"
if __name__ == "__main__":
# Project score
#
cscore_obj = CRISPR()
cscore = cscore_obj.filter(dtype="merged")
cscore_ht29 = cscore["SIDM00136"]
# Samplesheet
#
lib_name = "2gCRISPR_Pilot_library_v2.0.0.xlsx"
lib_ss = pd.read_excel(f"{DPATH}/gi_samplesheet.xlsx")
lib_ss = lib_ss.query(f"library == '{lib_name}'")
lib = read_gi_library(lib_name)
lib["sgRNA1_class"] = [classify_gene(g, c) for g, c in lib[["sgRNA1_Approved_Symbol", "sgRNA1_Chr"]].values]
lib["sgRNA2_class"] = [classify_gene(g, c) for g, c in lib[["sgRNA2_Approved_Symbol", "sgRNA2_Chr"]].values]
lib["vector_class"] = lib["sgRNA1_class"] + " + " + lib["sgRNA2_class"]
samples = list(set(lib_ss["name"]))
samples_pal = lib_ss.groupby("name")["palette"].first()
# Counts
#
counts = ReadCounts(
| pd.read_excel(f"{DPATH}/{lib_name}_samples_counts.xlsx", index_col=0) | pandas.read_excel |
import numpy as np
from sklearn.utils.multiclass import type_of_target
from mindware.base_estimator import BaseEstimator
from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET
from mindware.components.feature_engineering.transformation_graph import DataNode
class Classifier(BaseEstimator):
"""This class implements the classification task. """
def initialize(self, data: DataNode, **kwargs):
if self.metric is None:
self.metric = 'acc'
# Check the task type: {binary, multiclass}
task_type = type_of_target(data.data[1])
if task_type in type_dict:
task_type = type_dict[task_type]
else:
raise ValueError("Invalid Task Type: %s!" % task_type)
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data: DataNode, **kwargs):
"""
Fit the classifier to given training data.
:param data: instance of DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Predict classes for X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def refit(self):
return super().refit()
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""
Predict probabilities of classes for all samples X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples, n_classes]
The predicted class probabilities.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
if self.task_type != MULTILABEL_CLS:
assert (
np.allclose(
np.sum(pred_proba, axis=1),
np.ones_like(pred_proba[:, 0]))
), "Prediction probability does not sum up to 1!"
# Check that all probability values lie between 0 and 1.
assert (
(pred_proba >= 0).all() and (pred_proba <= 1).all()
), "Found prediction probability value outside of [0, 1]!"
return pred_proba
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMClassifier
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMClassifier(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_importance(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
std_array = np.std(_ef, ddof=1, axis=0)
abs_array = abs(_ef)
mean_array = np.mean(abs_array, axis=0)
_importance = std_array / mean_array
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
if (len(set(data.data[1]))) > 2:
print('ERROR! Only binary classification is supported!')
return 0
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
_impact = _ef[0]
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return | pd.DataFrame(h) | pandas.DataFrame |
import numpy as np
import scipy.stats as stats
import pandas as pd
import math
"""
Obtain 39 sleep feats, following https://www.nature.com/articles/s41598-020-79217-x.pdf
"""
def get_pre_mean_diff(data, index, num_ele):
pre_eles = data[index - num_ele : index]
pre_mean = np.mean(pre_eles)
return pre_mean - data[index]
def get_next_mean_diff(data, index, num_ele):
next_eles = data[index + 1 : index + num_ele + 1]
next_mean = np.mean(next_eles)
return data[index] - next_mean
def moving_sum(a, n=30 * 60 * 30):
# n = 30 min x 60 sec x 30 hz
# since n is even, so we need to shift to left or right by 1
half_win_len = int(n / 2)
a = np.pad(a, pad_width=[half_win_len, half_win_len])
ret = np.cumsum(a, dtype=float)
ret[half_win_len:-half_win_len] = ret[n:] - ret[:-n]
return ret[half_win_len - 1 : -half_win_len - 1]
def moving_avg(a, n=30 * 60 * 30):
# n = 30 min x 60 sec x 30 hz
# since n is even, so we need to shift to left or right by 1
win_sum = moving_sum(a, n)
# will have to apply division considering boundary condiiton
half_win_len = int(n / 2)
win_sum[half_win_len:-half_win_len] = win_sum[half_win_len:-half_win_len] / n
for i in range(half_win_len):
win_sum[i] = win_sum[i] / n
win_sum[-i - 1] = win_sum[-i - 1] / n
return win_sum
def get_stats_measures(signal, signal_name="signal"):
"""
Obtain seven stat measure for a sleep signal
signal: N x 1: N = sample_rate * window length
"""
feats = {
signal_name + "_mean": np.mean(signal),
signal_name + "_std": np.std(signal),
signal_name + "_min": np.min(signal),
signal_name + "_max": np.max(signal),
signal_name + "_mad": stats.median_abs_deviation(signal),
signal_name + "_entropy20": stats.entropy(np.histogram(signal, bins=20)[0]),
signal_name + "_entropy200": stats.entropy(np.histogram(signal, bins=200)[0]),
}
return feats
def win2frame(data):
# data (narray) of shape M x 3 x N: N = sample_rate * window_len
# M is the epoch count
# output long_format (narray) of shape MN x 3
x = data[:, 0, :]
y = data[:, 1, :]
z = data[:, 2, :]
long_format = np.array([x.flatten(), y.flatten(), z.flatten()]).T
return long_format
def get_enmo(x, y, z):
x_sq = x ** 2
y_sq = y ** 2
z_sq = z ** 2
tmp = np.sqrt(x_sq + y_sq + z_sq) - 1
enmo = np.maximum(0, tmp)
return enmo, x_sq, y_sq
def get_LISD(enmo):
pre_activity_count = np.maximum(0, enmo - 0.02)
win_len = 10 # min
activity_count = moving_sum(pre_activity_count, n=win_len * 60 * 30)
LIDS = 100.0 / (activity_count + 1)
win_len = 30 # min
LIDS = moving_avg(LIDS, n=win_len * 60 * 30)
return LIDS
def get_epoch_feats(enmo, angleZ, LIDS, epoch_len=30, sample_rate=30):
# Get stats at epoch level
# Epoch_len (sec)
# Sample_len (sec)
enmo = enmo.reshape(-1, epoch_len * sample_rate)
angleZ = angleZ.reshape(-1, epoch_len * sample_rate)
LIDS = LIDS.reshape(-1, epoch_len * sample_rate)
enmo_feats = pd.DataFrame([get_stats_measures(x, signal_name="enmo") for x in enmo])
angleZ_feats = pd.DataFrame(
[get_stats_measures(x, signal_name="angleZ") for x in angleZ]
)
LIDS_feats = pd.DataFrame([get_stats_measures(x, signal_name="LIDS") for x in LIDS])
merged = pd.merge(
left=enmo_feats,
left_index=True,
right=angleZ_feats,
right_index=True,
how="inner",
)
merged = pd.merge(
left=merged, left_index=True, right=LIDS_feats, right_index=True, how="inner"
)
return merged
def getInterEpochFeat(signal_mean, signal_name):
# This only works when window size is 30sec
# default to 0 at boundary
# signale_mean (narray)
Prev30Diff = []
Next30Diff = []
Prev60Diff = []
Next60Diff = []
Prev120Diff = []
Next120Diff = []
epoch_len = 30
nrow_30 = int(30 / epoch_len)
nrow_60 = int(60 / epoch_len)
nrow_120 = int(120 / epoch_len)
for i in range(len(signal_mean)):
if i < nrow_30:
Prev30Diff.append(0)
else:
Prev30Diff.append(get_pre_mean_diff(signal_mean, i, nrow_30))
if i < nrow_60:
Prev60Diff.append(0)
else:
Prev60Diff.append(get_pre_mean_diff(signal_mean, i, nrow_60))
if i < nrow_120:
Prev120Diff.append(0)
else:
Prev120Diff.append(get_pre_mean_diff(signal_mean, i, nrow_120))
if i + nrow_30 >= len(signal_mean):
Next30Diff.append(0)
else:
Next30Diff.append(get_next_mean_diff(signal_mean, i, nrow_30))
if i + nrow_60 >= len(signal_mean):
Next60Diff.append(0)
else:
Next60Diff.append(get_next_mean_diff(signal_mean, i, nrow_60))
if i + nrow_120 >= len(signal_mean):
Next120Diff.append(0)
else:
Next120Diff.append(get_next_mean_diff(signal_mean, i, nrow_120))
tmp_feats = {
signal_name + "Prev30diff": Prev30Diff,
signal_name + "Prev60diff": Prev60Diff,
signal_name + "Prev120diff": Prev120Diff,
signal_name + "Next30diff": Next30Diff,
signal_name + "Next60diff": Next60Diff,
signal_name + "Next120diff": Next120Diff,
}
tmp_df = | pd.DataFrame(tmp_feats) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare stations metadata."""
# pylint: disable=invalid-name
from typing import Dict, List
import pandas as pd
import pandera as pa
import requests
stations_schema = pa.DataFrameSchema(
columns={
"station_id": pa.Column(pa.Int),
"name": pa.Column(pd.StringDtype()),
"physical_configuration": pa.Column(pd.StringDtype()),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
"altitude": pa.Column(pa.Float64, nullable=True),
"address": pa.Column(pd.StringDtype()),
"capacity": pa.Column(pa.Int),
"physicalkey": pa.Column(pa.Int),
"transitcard": pa.Column(pa.Int),
"creditcard": pa.Column(pa.Int),
"phone": pa.Column(pa.Int),
},
index=pa.Index(pa.Int),
)
def get_stations_metadata(
stations_url: str, stations_params: Dict
) -> pd.DataFrame:
"""Get bikeshare stations metadata from JSON feed."""
package = requests.get(stations_url, params=stations_params).json()
resources = package["result"]["resources"]
df_about = pd.DataFrame.from_records(resources)
r = requests.get(df_about["url"].tolist()[0]).json()
url_stations = r["data"]["en"]["feeds"][2]["url"]
df_stations = pd.DataFrame.from_records(
requests.get(url_stations).json()["data"]["stations"]
)
df_stations = df_stations.astype(
{
"physical_configuration": pd.StringDtype(),
"name": | pd.StringDtype() | pandas.StringDtype |
from collections import defaultdict
from shapely.geometry import Polygon
from shapely.ops import cascaded_union
from descartes import PolygonPatch
import seaborn as sns
import re
import theano.tensor as tt
import scipy.stats
from itertools import product
import datetime
import pickle as pkl
import numpy as np
import pandas as pd
import pymc3 as pm
# from pymc3.stats import quantiles
from collections import OrderedDict
import isoweek
import itertools as it
import os
from datetime import timedelta
yearweek_regex = re.compile(r"([0-9]+)-KW([0-9]+)")
def make_county_dict():
with open('../data/counties/counties.pkl', "rb") as f:
counties = pkl.load(f)
county_list = []
#print(counties)
for key, _ in counties.items():
county_name = counties[key]['name']
encoded_name = counties[key]['name'].encode('utf-8')
if b'\xc2\x96' in encoded_name:
ix = encoded_name.index(b'\xc2\x96')
county_name = counties[key]['name'][:ix]+'-'+counties[key]['name'][ix+1:]
county_list.append((county_name, key))
return OrderedDict(county_list)
def _parse_yearweek(yearweek):
"""Utility function to convert internal string representations of calender weeks into datetime objects. Uses strings of format `<year>-KW<week>`. Weeks are 1-based."""
year, week = yearweek_regex.search(yearweek).groups()
# datetime.combine(isoweek.Week(int(year), int(week)).wednesday(),time(0))
return isoweek.Week(int(year), int(week))
parse_yearweek = np.frompyfunc(_parse_yearweek, 1, 1)
def load_data(prediction_region, counties, csv_path, seperator=",", pad=None):
data = pd.read_csv(csv_path,
sep=seperator, encoding='iso-8859-1', index_col=0)
if "99999" in data.columns:
data.drop("99999", inplace=True, axis=1)
data = data.loc[:, list(
filter(lambda cid: prediction_region in counties[cid]["region"], data.columns))]
if pad is not None:
# get last date
last_date = pd.Timestamp(data.iloc[:, -1].index[-1])
extra_range = pd.date_range(
last_date+timedelta(1), last_date+timedelta(pad))
for x in extra_range:
data = data.append(pd.Series(name=str(x)[:11]))
data.index = [pd.Timestamp(date) for date in data.index]
return data
def load_data_n_weeks(
start,
n_weeks,
csv_path,
seperator=",",
pad = None
):
data = pd.read_csv(csv_path, sep=seperator, encoding='iso-8859-1', index_col=0)
if "99999" in data.columns:
data.drop("99999", inplace=True, axis=1)
data.index = [pd.Timestamp(date) for date in data.index]
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
data = data.loc[start_day <= data.index]
if pad is not None:
last_date = data.index[-1]
extended_index = pd.date_range(last_date + pd.Timedelta(days=1),
last_date + pd.Timedelta(days=pad))
for x in extended_index:
data = data.append(pd.Series(name=x))
data.index = [ | pd.Timestamp(date) | pandas.Timestamp |
'''
(c) University of Liverpool 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=too-many-branches
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-statements
# pylint: disable=wrong-import-order
import ast
import re
from liv_ms.data import mona, rt
import pandas as pd
_FLOW_RATE_PATTERN = r'(\d+(?:\.\d+)?)\s?([um])[lL]\s?/\s?min' + \
r'(?:\s+at\s+(\d+(?:\.\d+)?)(?:-(\d+(?:\.\d+)?))? min)?'
_FLOW_GRAD_PATTERN_1 = r'(\d+(?:\.\d+)?)(?:/(\d+(?:\.\d+)?))?' + \
r'(?:/(\d+(?:\.\d+)?))?(?:\s+at\s+(\d+(?:\.\d+)?)' + \
r'(?:-(\d+(?:\.\d+)?))? min)?\.?$'
_FLOW_GRAD_PATTERN_2 = r'(\d+(?:\.\d+)?)(?:min)?:(\d+(?:\.\d+)?)%'
_FLOW_GRAD_PATTERN_3 = r'(\d+(?:\.\d+)?) % (\w) ?to ?(\d+(?:\.\d+)?)' + \
r' % (\w)\/(\d+(?:\.\d+)?) min'
_FLOW_GRAD_PATTERN_4 = r'linear from (\d+(?:\.\d+)?)\w\/(\d+(?:\.\d+)?)\w' + \
r' at (\d+(?:\.\d+)?) min to (\d+(?:\.\d+)?)\w\/(\d+(?:\.\d+)?)\w' + \
r' at (\d+(?:\.\d+)?) min(?:, hold (\d+(?:\.\d+)?) min' + \
r' at (\d+(?:\.\d+)?)\w\/(\d+(?:\.\d+)?)\w, reequilibration' + \
r' (\d+(?:\.\d+)?)\w\/(\d+(?:\.\d+)?)\w \((\d+(?:\.\d+)?) min\))?'
_SOL_REGEXP = r'(?:(\d+(?:\.\d+)?)' + \
r'\:?(\d+(?:\.\d+)?)?\:?(\d+(?:\.\d+)?)?)?' + \
r' ?([a-z\s]+)(?:\:([a-z\s]+))?(?:\:([a-z\s]+))?' + \
r' ?(?:(\d+(?:\.\d+)?)\:?(\d+(?:\.\d+)?)?\:?(\d+(?:\.\d+)?)?)?'
_DIM_PATTERN = \
r'(\d+(?:\.\d+)?)(?: )?(?:mm)?(?: )?(?:x|by)(?: )?(\d+(?:\.\d+)?) ?mm'
_PART_PATTERN = r'(\d+(?:\.\d+)?)(?: )?(?:um|micron|microm)'
_HYDROPHOBIC_PATTERN = r'C\d+|BEH'
def get_rt_data(filename, num_spec=1e32, regen_stats=True):
'''Get RT data.'''
if regen_stats:
# Get spectra:
df = mona.get_spectra(filename, num_spec=num_spec)
# Clean data:
df = _clean_ms_level(df)
df = _clean_rt(df)
df = _clean_flow_rate(df)
df = _clean_gradient(df)
# Encode column:
_encode_column(df)
# Get stats:
stats_df = rt.get_stats(df)
# Save stats_df:
_save_stats(stats_df)
else:
stats_df = pd.read_csv(
'mona_stats.csv',
converters={'column values': ast.literal_eval,
'flow rate values': ast.literal_eval,
'gradient values': ast.literal_eval})
return stats_df
def _save_stats(stats_df):
'''Save stats.'''
# Convert values to list to enable saving:
for col_name in ['column values',
'flow rate values',
'gradient values']:
stats_df.loc[:, col_name] = \
stats_df[col_name].apply(
lambda x: x if isinstance(x, float) else list(x))
stats_df.to_csv('mona_stats.csv', index=False)
def _clean_ms_level(df):
'''Clean MS level.'''
return df[df['ms level'] == 'MS2']
def _clean_rt(df):
'''Clean retention time.'''
df = df.dropna(subset=['retention time'])
res = df['retention time'].apply(_clean_rt_row)
df.loc[:, 'retention time'] = res
df.loc[:, 'retention time'] = df['retention time'].astype('float32')
return df.dropna(subset=['retention time'])
def _clean_rt_row(val):
'''Clean single retention time value.'''
try:
val = val.replace('N/A', 'NaN')
val = val.replace('min', '')
if 's' in val:
val = val.replace('sec', '')
val = val.replace('s', '')
return float(val) / 60.0
except AttributeError:
# Forgiveness, not permission. Assume float and pass:
pass
try:
return float(val)
except ValueError:
return float('NaN')
def _clean_flow_rate(df):
'''Clean flow rate.'''
df.loc[:, 'flow rate values'] = \
df['flow rate'].apply(_clean_flow_rate_row)
return df
def _clean_flow_rate_row(val):
'''Clean single flow rate value.'''
terms = []
try:
terms.extend([(0.0, float(val)),
(2**16, float(val))])
except ValueError:
val = val.lower()
for term in val.split(','):
term = term.strip()
term = term.replace('min-1', '/min')
mtch = re.match(_FLOW_RATE_PATTERN, term)
if mtch:
grps = mtch.groups()
factor = 1.0 if grps[1] == 'm' else 1000.0
rate = float(grps[0]) / factor
if grps[2]:
terms.extend([(float(grps[2]), rate)])
else:
terms.extend([(0.0, rate),
(2**16, rate)])
if grps[3]:
terms.extend([(float(grps[3]), rate)])
else:
terms.extend([(0.0, float('NaN')),
(2**16, float('NaN'))])
return rt.get_timecourse_vals(list(zip(*terms)))
def _clean_gradient(df):
'''Clean gradient.'''
_clean_solvents(df)
df.loc[:, 'gradient values'] = \
df.apply(_clean_gradient_row, axis=1)
return df
def _clean_solvents(df):
'''Clean solvent columns.'''
for col in ['solvent', 'solvent a', 'solvent a)', 'solvent b']:
# Lowercase:
df[col] = df[col].str.lower()
# Replace chemicals:
for trgt, rplcmnt in [('h2o', 'water'),
('acn', 'acetonitrile'),
('ch3cn', 'acetonitrile'),
('acetonitril ', 'acetonitrile '),
('meoh', 'methanol'),
('hcooh', 'formic acid'),
('fa', 'formic acid')]:
df[col] = [val.replace(trgt, rplcmnt)
if | pd.notnull(val) | pandas.notnull |
import yfinance as yf
import matplotlib.pyplot as plt
import numpy as npf
import pandas as pd
import datetime
from smart_weights import *
from connect_database import add_portfolio
# Variables
# Will have to globalize the yfinance data too cause having to constantly do api calls is going to make our code really slow
global stock
global stock_info
# two step process
# 1. Input a ticker
# 2. Then the user will input the function
def last_trading_day():
rightnow = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=-5), 'EST'))
# US Markets close at 4pm, but afterhours trading ends at 8pm.
# yFinance stubbornly only gives the day's data after 8pm, so we will wait until 9pm to pull data from
# the current day.
market_close = rightnow.replace(hour=21, minute=0, second=0, microsecond=0)
if rightnow < market_close:
DELTA = 1
# If it is saturday or sunday
elif rightnow.weekday() >= 5:
DELTA = 1
else:
DELTA = 0
start_date = (datetime.datetime.now() - datetime.timedelta(days=15)).strftime("%Y-%m-%d")
end_date = (datetime.datetime.now() - | pd.tseries.offsets.BDay(DELTA) | pandas.tseries.offsets.BDay |
import MDAnalysis
import MDAnalysis.analysis.hbonds
import pandas as pd
import numpy as np
import os
from collections import defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import sys
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
#logger.addHandler(logging.FileHandler('test.log', 'a'))
print = logger.info
sys.setrecursionlimit(1000)
print(sys.getrecursionlimit())
class HB_MD:
def __init__(self, frame):
self.direct_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.one_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.two_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.three_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.four_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.hb_analysis(frame, self.direct_connection, self.one_water_connection, self.two_water_connection, self.three_water_connection, self.four_water_connection)
return
def addEdge(self, graph,u,v):
graph[u].append(v)
def generate_edges(self, graph):
edges = []
for node in graph:
for neighbour in graph[node]:
edges.append((node, neighbour))
return edges
def find_path(self, graph, start, end, path =[]):
path = path + [start]
if start == end:
return path
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
return newpath
return None
def find_all_path(self, graph, start, path, paths):
if len(path) == 6:
return paths.append(list(path))
if len(graph[start]) == 0:
return paths.append(list(path))
for node in graph[start]:
if node in path:
continue
path.append(node)
self.find_all_path(graph, node, path, paths)
path.pop()
def get_chain(self, frame, chain):
i = 0
pdb = open(frame, 'r')
#os.system('sed -i "s/1H / H1/" hoh.pdb')
for line in pdb:
#line.replace('HOH', 'TIP3')
if line[0:4] != 'ATOM':
continue
chain[i] = line[21:22]
i += 1
return
def MDtraj(self, pdb):
#print('Getting coordinate')
h3 = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(pdb, 'not resname ALA and not resname GLN and not resname GLY and not resname ILE and not resname LEU and not resname PHE and not resname PRO and not resname VAL',
'not resname ALA and not resname GLN and not resname GLY and not resname ILE and not resname LEU and not resname PHE and not resname PRO and not resname VAL', distance=3.5, angle=90.0, acceptors = {'O1', 'O2'})
#print('Analyzing')
h3.run()
#print('Generating table')
h3.generate_table()
#print('Generating form')
df3 = pd.DataFrame.from_records(h3.table)
h3.generate_table()
df3 = pd.DataFrame.from_records(h3.table)
return df3
def get_all_connection(self, df3, chain, index_donor, index_accept):
for index2, row2 in df3.iterrows():
if row2['donor_resnm'] == 'TIP3'and row2['acceptor_resnm'] != 'TIP3':
if row2['donor_atom'] == 'H1':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-1))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
if row2['donor_atom'] == 'H2':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-2))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
elif row2['acceptor_resnm'] == 'TIP3' and row2['donor_resnm'] != 'TIP3':
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
index_donor.append(row2['donor_resnm'] + '_' + chain[row2['donor_index']] + '_' + str(row2['donor_resid']))
elif row2['acceptor_resnm'] == 'TIP3' and row2['donor_resnm'] == 'TIP3':
if row2['donor_atom'] == 'H1':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-1))
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
if row2['donor_atom'] == 'H2':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-2))
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
else:
index_donor.append(row2['donor_resnm'] + '_' + chain[row2['donor_index']] + '_' + str(row2['donor_resid']))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
return
def divide_networks(self, hb_two, donor_residue, acceptor_residue, donor_residue2, acceptor_residue2):
#print('Divide networks')
for row in range(len(hb_two)):
if hb_two['donor_residue'][row][0:3] != 'TIP' and hb_two['acceptor_residue'][row][0:3] != 'TIP':
if hb_two['donor_residue'][row] == hb_two['acceptor_residue'][row]:
continue
else:
donor_residue.append(hb_two['donor_residue'][row])
acceptor_residue.append(hb_two['acceptor_residue'][row])
else:
if hb_two['donor_residue'][row] == hb_two['acceptor_residue'][row]:
continue
else:
donor_residue2.append(hb_two['donor_residue'][row])
acceptor_residue2.append(hb_two['acceptor_residue'][row])
return
def count_water_num(self, path, donor, accept, wat_num):
#print('Count number of water in paths')
for item in path:
donor_column = [item[0]]
accpt_column = []
count = 0
for r in range(1, len(item)):
if item[r][0:3] != 'TIP':
donor_column.append(item[r])
accpt_column.append(item[r])
wat_num.append(count)
count = 0
else:
count += 1
if len(donor_column) > len(accpt_column):
donor_column.pop()
else:
accpt_column.pop()
donor.extend(donor_column)
accept.extend(accpt_column)
return
#c = u.select_atoms("protein and prop z > 85 or around 3.0 protein and prop z > 85 ")
#c.write('/Users/zhangyingying/Dropbox (City College)/Yingying/large_file/new_trajectories_PSII_wt/cut_frame32_50_test.pdb')
def hb_analysis(self, frame, direct_connection, one_water_connection, two_water_connection, three_water_connection, four_water_connection):
chain = {}
graph = defaultdict(list)
pdb = MDAnalysis.Universe(frame)
self.get_chain(frame, chain)
df3 = self.MDtraj(pdb)
index_donor = []
index_accept = []
self.get_all_connection(df3, chain, index_donor, index_accept)
df3['donor_residue'] = index_donor
df3['acceptor_residue'] = index_accept
dic_hdonnor = {'ASP':['HD1', 'HD2'], 'ARG': ['HH11', 'HH12', 'HH21', 'HH22', 'HE'], 'GLU':['HE1', 'HE2'], 'HIS':['HD1', 'HE2'], 'HSD':['HD1', 'HE2'], 'HSE':['HD1', 'HE2'], 'HSP':['HD1', 'HE2'],
'SER':['HG'], 'THR':['HG1'], 'ASN':['HD21', 'HD22'], 'GLN':['HE21', 'HE22'], 'CYS':['HG'], 'TYR':['HH'], 'TRP':['HE1'], 'LYS':['HZ1', 'HZ2', 'HZ3'], 'TIP3':['H1', 'H2'], 'HOH':['1H', '2H']}
dic_accept = {'ASP':['OD1', 'OD2'], 'HCO': ['OC1', 'OC2'], 'ARG': ['NE', 'NH1', 'NH2'], 'GLU':['OE1', 'OE2'], 'HSD':['ND1', 'NE2'], 'HSE':['ND1', 'NE2'], 'HSP':['ND1', 'NE2'], 'HIS':['ND1', 'NE2'],
'SER':['OG'], 'THR':['OG1'], 'ASN':['OD1'], 'GLN':['OE1'], 'CYS':['SG'], 'TYR':['OH'], 'LYS':['NZ'], 'MET':['SD'], 'CLX':['CLX'], 'CLA':['CLA'], 'OX2':['OX2'], 'PL9':['O1', 'O2'], 'FX':['FX'], 'TIP3':['OH2'], 'HOH':['O'], 'MQ8':['O1', 'O2']}
donor_residue_pick = []
acceptor_residue_pick = []
for index, row in df3.iterrows():
if row['donor_resnm'] in dic_hdonnor.keys() and row['acceptor_resnm'] in dic_accept.keys():
if row['donor_atom'] in dic_hdonnor[row['donor_resnm']] and row['acceptor_atom'] in dic_accept[row['acceptor_resnm']]:
donor_residue_pick.append(row['donor_residue'])
acceptor_residue_pick.append(row['acceptor_residue'])
else:
continue
hb_two = pd.DataFrame({'donor_residue':donor_residue_pick, 'acceptor_residue':acceptor_residue_pick})
donor_residue = []
acceptor_residue = []
donor_residue2 = []
acceptor_residue2 = []
self.divide_networks(hb_two, donor_residue, acceptor_residue, donor_residue2, acceptor_residue2)
dire_con = pd.DataFrame({'donor_residue': donor_residue, 'acceptor_residue': acceptor_residue, 'wat_num': [0]*len(donor_residue)})
wat_con = pd.DataFrame({'donor_residue': donor_residue2, 'acceptor_residue': acceptor_residue2})
# connection via water
wat_con = wat_con.drop_duplicates()
wat_con.index = range(0, len(wat_con))
# direct connection
dire_con = dire_con.drop_duplicates()
dire_con.index = range(0, len(dire_con))
#wat_con.to_csv('/Users/zhangyingying/Dropbox (City College)/Yingying/PSII/quinone/hb_network/conncetion_hoh_frame32_50.csv')
#print('Generating graph')
for i in range(len(wat_con)):
self.addEdge(graph, wat_con['donor_residue'][i], wat_con['acceptor_residue'][i])
visited = []
path = []
#print('Finding all paths through water')
for res in range(len(wat_con)):
results = []
if wat_con['donor_residue'][res] not in visited and wat_con['donor_residue'][res][0:3] != 'TIP':
self.find_all_path(graph, wat_con['donor_residue'][res], [wat_con['donor_residue'][res]], results)
path = path + results
visited.append(wat_con['donor_residue'][res])
else:
continue
donor = []
accept = []
wat_num = []
self.count_water_num(path, donor, accept, wat_num)
# put all the connection together get the network
res_wat_res = pd.DataFrame({'donor_residue': donor, 'acceptor_residue': accept, 'wat_num': wat_num})
res_wat_res = res_wat_res.drop_duplicates()
hb_network = pd.concat([dire_con, res_wat_res])
hb_network.index = range(0, len(hb_network))
visited_1 = []
visited_2 = []
visited_3 = []
visited_4 = []
for i in range(0, len(hb_network)):
if hb_network['wat_num'][i] == 0:
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
direct_connection = direct_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 1 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_1:
visited_1.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
one_water_connection = one_water_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 2 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_2:
visited_2.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
two_water_connection = two_water_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 3 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_3:
visited_3.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
three_water_connection = three_water_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 4 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_4:
visited_4.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row = | pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]}) | pandas.Series |
import gc
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
# 1. 读取数据
path = "./data/"
train_file = "train3.csv"
test_file = "test3.csv"
trainDf = pd.read_csv(path + train_file)
# testDf = pd.read_csv(path + train_file, nrows=1000, skiprows=range(1, 10000))
pos_trainDf = trainDf[trainDf['target'] == 1]
neg_trainDf = trainDf[trainDf['target'] == 0].sample(n=20000, random_state=2018)
trainDf = pd.concat([pos_trainDf, neg_trainDf], axis=0).sample(frac=1.0, random_state=2018)
del pos_trainDf; del neg_trainDf; gc.collect();
print(trainDf.shape, trainDf['target'].mean())
trainDf, testDf, _, _ = train_test_split(trainDf, trainDf['target'], test_size=0.25, random_state=2018)
print(trainDf['target'].mean(), trainDf.shape)
print(testDf['target'].mean(), testDf.shape)
"""
一共59个特征,包括id, target
bin特征17个;cat特征14个;连续特征26个;
Code:
columns = trainDf.columns.tolist()
bin_feats = []
cat_feats = []
con_feats = []
for col in columns:
if 'bin' in col:
bin_feats.append(col)
continue
if 'cat' in col:
cat_feats.append(col)
continue
if 'id' != col and 'target' != col:
con_feats.append(col)
print(len(bin_feats), bin_feats)
print(len(cat_feats), cat_feats)
print(len(con_feats), con_feats)
"""
bin_feats = ['ps_ind_06_bin', 'ps_ind_07_bin', 'ps_ind_08_bin', 'ps_ind_09_bin', 'ps_ind_10_bin', 'ps_ind_11_bin', 'ps_ind_12_bin', 'ps_ind_13_bin', 'ps_ind_16_bin', 'ps_ind_17_bin', 'ps_ind_18_bin', 'ps_calc_15_bin', 'ps_calc_16_bin', 'ps_calc_17_bin', 'ps_calc_18_bin', 'ps_calc_19_bin', 'ps_calc_20_bin']
cat_feats = ['ps_ind_02_cat', 'ps_ind_04_cat', 'ps_ind_05_cat', 'ps_car_01_cat', 'ps_car_02_cat', 'ps_car_03_cat', 'ps_car_04_cat', 'ps_car_05_cat', 'ps_car_06_cat', 'ps_car_07_cat', 'ps_car_08_cat', 'ps_car_09_cat', 'ps_car_10_cat', 'ps_car_11_cat']
con_feats = ['ps_ind_01', 'ps_ind_03', 'ps_ind_14', 'ps_ind_15', 'ps_reg_01', 'ps_reg_02', 'ps_reg_03', 'ps_car_11', 'ps_car_12', 'ps_car_13', 'ps_car_14', 'ps_car_15', 'ps_calc_01', 'ps_calc_02', 'ps_calc_03', 'ps_calc_04', 'ps_calc_05', 'ps_calc_06', 'ps_calc_07', 'ps_calc_08', 'ps_calc_09', 'ps_calc_10', 'ps_calc_11', 'ps_calc_12', 'ps_calc_13', 'ps_calc_14']
# 2. 特征处理
trainDf = trainDf.fillna(0)
testDf = testDf.fillna(0)
train_sz = trainDf.shape[0]
combineDf = pd.concat([trainDf, testDf], axis=0)
del trainDf
del testDf
gc.collect()
# 2.1 连续特征全部归一化
from sklearn.preprocessing import MinMaxScaler
for col in con_feats:
scaler = MinMaxScaler()
combineDf[col] = scaler.fit_transform(np.array(combineDf[col].values.tolist()).reshape(-1,1))
# 2.2 离散特征one-hot
for col in bin_feats + cat_feats:
onehotret = | pd.get_dummies(combineDf[col], prefix=col) | pandas.get_dummies |
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
from ibis.pandas.aggcontext import window_agg_udf
@pytest.mark.parametrize(
'param',
[
(
pd.Series([True, True, True, True]),
pd.Series([1.0, 2.0, 2.0, 3.0]),
),
(
pd.Series([False, True, True, False]),
| pd.Series([np.NaN, 2.0, 2.0, np.NaN]) | pandas.Series |
"""
NAD Lab Tools
This program was written for the NAD Lab at the University of Arizona by <NAME>.
It processes intracellular calcium concentration and pH measurements (from the InCytim2 software)
as well as filters the data for outliers and spikes.
The experiment consists of placing fluorescent-stained cells under a microscope to measure either
calcium concentration or pH. Over a period of time, solutions are added to determine the response
of the quantities.
<NAME> 2019
"""
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilenames, askdirectory
# File names for analysis
names = []
# Output directory
output = ''
# Names of drugs added to solution
events = []
# Beginning of baseline measurement
itime = 60
# End of baseline measurement
ftime = 200
# Lower threshold to exclude cells
lbase = 50
# Upper threshold to exclude cells
ubase = 150
# Mode to analyze data - can be either 'Calcium' or 'pH'
measure = 'Calcium'
def process_data(df):
"""
Takes in a pandas dataframe and calculates the mean Calcium/pH as well as ratios
between different wavelength measurements. Then formats the data into a CSV file.
Lastly, uses the user-defined thresholds to exclude outlier cells.
Arguments:
df {pd.DataFrame} -- a dataframe to process
Returns:
tuple -- a tuple of dataframes containing the processed dataframes, their outliers, and
graph data
"""
global itime, ftime, lbase, ubase, measure
# Adjusts parameters based on Calcium/pH mode
meas = ''
length1 = ''
length2 = ''
meanname = ''
if measure == 'Calcium':
meas = 'Ca++'
length1 = '340'
length2 = '380'
meanname = 'Mean Calcium (nM)'
elif measure == 'pH':
meas = 'pH'
length1 = '488'
length2 = '460'
meanname = 'Mean pH'
# Reads pertinent data from dataframe
times = df.iloc[:, 0].to_frame(name='Time (s)').astype(float)
calcium = df.filter(like=meas, axis=1).astype(float)
conc_340 = df.filter(like=length1, axis=1).astype(float)
conc_380 = df.filter(like=length2, axis=1).astype(float)
ratio = | pd.DataFrame() | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.binning.bin_rare_events import BinRareEvents
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
X = pd.DataFrame(
{
"A": ["w", "z", "q", "q", "q", "z"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "e", "d", "d", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
X_expected = pd.DataFrame(
{
"A": ["OTHERS", "OTHERS", "q", "q", "q", "OTHERS"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "OTHERS", "OTHERS", "OTHERS", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
obj = BinRareEvents(min_ratio=0.5).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_1object():
X = pd.DataFrame({"A": ["w", "z", "q", "q", "q", "z"], "D": [1, 2, 3, 4, 5, 6]})
X_expected = pd.DataFrame(
{"A": ["OTHERS", "OTHERS", "q", "q", "q", "OTHERS"], "D": [1, 2, 3, 4, 5, 6]}
)
obj = BinRareEvents(min_ratio=0.5).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_all_others():
X = pd.DataFrame(
{
"A": ["w", "z", "q", "q", "q", "z"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "e", "d", "d", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
X_expected = pd.DataFrame(
{
"A": ["OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS"],
"B": ["OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS"],
"C": ["OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS"],
"D": [1, 2, 3, 4, 5, 6],
}
)
obj = BinRareEvents(min_ratio=1.0).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_no_other():
X = pd.DataFrame(
{
"A": ["w", "z", "q", "q", "q", "z"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "e", "d", "d", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
obj = BinRareEvents(min_ratio=0.0).fit(X)
obj = BinRareEvents(min_ratio=0.0).fit(X)
return obj, X, X.copy()
@pytest.fixture
def data_num():
X = pd.DataFrame({"A": [1, 2, 3, 4, 5, 6], "B": [1, 2, 3, 4, 5, 6]})
obj = BinRareEvents(min_ratio=1.0).fit(X)
return obj, X, X.copy()
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{
"A": ["w", "z", "q", "q", "q", "z"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "e", "d", "d", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
X_expected = pd.DataFrame(
{
"A": ["OTHERS", "OTHERS", "q", "q", "q", "OTHERS"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "OTHERS", "OTHERS", "OTHERS", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
obj = BinRareEvents(min_ratio=0.5).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_1object_ks():
X = ks.DataFrame({"A": ["w", "z", "q", "q", "q", "z"], "D": [1, 2, 3, 4, 5, 6]})
X_expected = pd.DataFrame(
{"A": ["OTHERS", "OTHERS", "q", "q", "q", "OTHERS"], "D": [1, 2, 3, 4, 5, 6]}
)
obj = BinRareEvents(min_ratio=0.5).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_all_others_ks():
X = ks.DataFrame(
{
"A": ["w", "z", "q", "q", "q", "z"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "e", "d", "d", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
X_expected = pd.DataFrame(
{
"A": ["OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS"],
"B": ["OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS"],
"C": ["OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS", "OTHERS"],
"D": [1, 2, 3, 4, 5, 6],
}
)
obj = BinRareEvents(min_ratio=1.0).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_no_other_ks():
X = ks.DataFrame(
{
"A": ["w", "z", "q", "q", "q", "z"],
"B": ["x", "x", "w", "w", "w", "x"],
"C": ["c", "c", "e", "d", "d", "c"],
"D": [1, 2, 3, 4, 5, 6],
}
)
obj = BinRareEvents(min_ratio=0.0).fit(X)
obj = BinRareEvents(min_ratio=0.0).fit(X)
return obj, X, X.to_pandas().copy()
@pytest.fixture
def data_num_ks():
X = ks.DataFrame({"A": [1, 2, 3, 4, 5, 6], "B": [1, 2, 3, 4, 5, 6]})
obj = BinRareEvents(min_ratio=1.0).fit(X)
return obj, X, X.to_pandas().copy()
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
X_expected.index = X_new.index
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = | pd.DataFrame(X_numpy_new, columns=X_expected.columns) | pandas.DataFrame |
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pandas as pd
import tensorflow as tf
from zenml.core.repo import Repository
from zenml.integrations.dash.visualizers.pipeline_run_lineage_visualizer import (
PipelineRunLineageVisualizer,
)
from zenml.pipelines import pipeline
from zenml.steps import Output, step
FEATURE_COLS = [
"CRIM",
"ZN",
"INDUS",
"CHAS",
"NOX",
"RM",
"AGE",
"DIS",
"RAD",
"TAX",
"PTRATIO",
"B",
"STAT",
]
TARGET_COL_NAME = "target"
def convert_np_to_pandas(X, y):
df = | pd.DataFrame(X, columns=FEATURE_COLS) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from pathlib import Path
from typing import Iterable, List
import gym
import pandas as pd
import yaml
from llvm_autotuning.autotuners import Autotuner
from llvm_autotuning.benchmarks import Benchmarks
from pydantic import BaseModel, Field
from compiler_gym import CompilerEnvStateWriter
from compiler_gym.util.executor import Executor
logger = logging.getLogger(__name__)
class Experiment(BaseModel):
"""The composition of a full autotuning experiment, comprising autotuner,
executor, and programs to tune.
"""
# === Start of fields list. ===
executor: Executor
"""The execution environment to use for training / testing jobs."""
autotuner: Autotuner
benchmarks: Benchmarks
"""The set of benchmarks to test on."""
working_directory: Path
"""The working directory where logs and other artifacts are written to."""
experiment: str = "unnamed_experiment"
"""A logical name for this experiment. This is used for naming RLlib
trials.
"""
num_replicas: int = Field(default=1, ge=1)
"""The number of duplicate jobs to run. E.g. for training, this will train
:code:`n` independent models in trials that share the same working
directory.
"""
seed: int = 0xCC
"""A numeric random seed."""
# === Start of public API. ===
def run(self) -> None:
"""Run the experiment."""
# The working directory may already have been created by hydra, so we
# will check for the config.json file below to see if this experiment
# has already run.
self.working_directory.mkdir(parents=True, exist_ok=True)
# Dump the parsed config to file.
assert not self.config_path.is_file(), (
f"Refusing to overwrite file: {self.config_path}. "
"Is the working directory clean?"
)
with open(self.config_path, "w") as f:
print(json.dumps(json.loads(self.json()), indent=2), file=f)
logger.info("Wrote %s", self.config_path)
results_num = 0
with self.executor.get_executor(
logs_dir=self.working_directory / "logs"
) as executor:
with gym.make("llvm-v0") as env:
for replica_num in range(self.num_replicas):
for benchmark in self.benchmarks.benchmark_uris_iterator(env):
results_num += 1
results_path = (
self.working_directory / f"results-{results_num}.csv"
)
executor.submit(
_experiment_worker,
autotuner=self.autotuner,
benchmark=benchmark,
results_path=results_path,
seed=self.seed + replica_num,
)
def yaml(self) -> str:
"""Serialize the model configuration to a YAML string."""
# We can't directly dump the dict() representation because we need to
# simplify the types first, so we go via JSON.
simplified_data = json.loads(self.json())
return yaml.dump(simplified_data)
@property
def config_path(self) -> Path:
return self.working_directory / "config.json"
@property
def results_paths(self) -> Iterable[Path]:
"""Return an iterator over results files."""
for path in self.working_directory.iterdir():
if path.is_file() and path.name.startswith("results-"):
yield path
@property
def configuration_number(self) -> str:
return self.working_directory.name.split("-")[-1]
@property
def timestamp(self) -> str:
return f"{self.working_directory.parent.parent.name}/{self.working_directory.parent.name}"
@property
def dataframe(self) -> pd.DataFrame:
"""Return the results as a dataframe."""
dfs = []
for path in self.results_paths:
dfs.append(pd.read_csv(path))
if not dfs:
return | pd.DataFrame() | pandas.DataFrame |
from time import perf_counter
from os import chdir, getcwd
import numpy as np
import pandas as pd
from plot import *
class del_then_inter:
def __init__(self, infile: str, has_imu: bool, conv_time: bool, plot_choice):
self.df, _ = setup(infile, has_imu, conv_time)
self.plot_choice = plot_choice
# Delete unimportant columns
#self.df.drop(self.df.loc[:,'IMU_AngVelX':'IMU_LinearAccZ'].columns, inplace=True, axis=1)
def delete_vals(self) -> None:
print('\n\tdeleting bad values...\n')
self.df = self.df.reset_index()
for i in range(len(self.df.GPS_Long)):
if self.df.SDn[i] > 0.005:
self.df.loc[i,'GPS_Long':'GPS_Alt'] = pd.NA
def interpolate(self) -> None:
print('\tinterpolating...\n')
# Force columns into numeric data types
self.df['GPS_Long'] = pd.to_numeric(self.df['GPS_Long'], errors='coerce')
self.df['GPS_Lat'] = pd.to_numeric(self.df['GPS_Lat'], errors='coerce')
self.df['GPS_Alt'] = pd.to_numeric(self.df['GPS_Alt'], errors='coerce')
# Interpolate all GNSS values in the df as floats
self.df.loc[:, 'GPS_Long':'GPS_Alt'] = self.df.loc[:, 'GPS_Long':'GPS_Alt'].interpolate(method='linear')
self.df['GPS_Status'] = self.df['GPS_Status'].interpolate(method='ffill')
# Remove previously deleted values
self.df = self.df[self.df['GPS_Long'].notna()]
def write_to_file(self, name: str):
# Change 'time' back to rospy.time[]
self.df.Time = self.df.Time.apply(lambda x: f'rospy.Time[{x:19d}]')
self.df.drop('index', axis=1, inplace=True)
# Save the file
self.df.to_csv(f'.\\results\\{name}.csv', index=False)
print(f'\nSaved new file to .\\results\\{name}.csv')
# Plot the desired plot
if self.plot_choice:
print(f'\nPlotting...\n')
choose_plot(self.df, self.plot_choice)
class fix_from_vel:
def __init__(self, infile, has_imu, conv_time):
self.gnss_df, _ = setup(infile, has_imu, conv_time)
self.gnss_df.loc[:, 'GPS_Long':'GPS_Lat'] = [geodetic_to_geocentric(*a) for a in tuple(zip(self.gnss_df['GPS_Long'], self.gnss_df['GPS_Lat'], self.gnss_df['GPS_Alt']))]
self.gnss_df = add_vectors(self.gnss_df)
def rem_vel_outlier(df) -> None:
'''
Status:
nulling values based on Std works, but not based on absolute velocity change
Values are still strecthed when compared to GPS_Long, GPS_Lat
This notably wasnt the case before I force converted the merged df to numeric
'''
df['Rolling_X'] = df.VelX.rolling(5).mean()
df['Rolling_Y'] = df.VelY.rolling(5).mean()
df['Rolling_Z'] = df.VelZ.rolling(5).mean()
#df.loc[df[df.SDn > 10].index, 'VelX':'VelZ'] = pd.NA
df.VelX.map(lambda x: pd.NA if abs(x-df.Rolling_X)/() > 1 else x.VelX)
class fix_from_imu:
def __init__(self, infile, has_imu):
self.gnss_df, self.imu_df = setup(infile, has_imu)
self.gnss_df.loc[:, 'GPS_Long':'GPS_Lat'] = [geodetic_to_geocentric(*a) for a in tuple(zip(self.gnss_df['GPS_Long'], self.gnss_df['GPS_Lat'], self.gnss_df['GPS_Alt']))]
self.gnss_df = add_vectors(self.gnss_df)
self.gnss_df = trim_df_vel(self.gnss_df, 10, 1)
self.df = merge_dfs(self.gnss_df, self.imu_df)
### PLOT FUNCTIONS
def choose_plot(df, plot_choice):
if plot_choice == 1:
plot_track(df)
elif plot_choice == 2:
pass
### MAIN FUNCTIONS
def setup(infile: str, has_imu: bool, conv_time: bool) -> pd.DataFrame:
t1 = perf_counter()
print('\n' + '#'*80 + '\n')
def ingest_file(infile) -> pd.DataFrame:
# Set and Get directory
chdir(r'C:\Users\mikeh\OneDrive\Documents\GitHub\ouster_localization')
dir = getcwd()
print(f'\ndirectory: {dir}\n\n')
print('\treading file...\n')
# Import the comma delimited .txt file as a pandas dataframe
df = | pd.read_csv(f'{dir}\\{infile}', delimiter=',') | pandas.read_csv |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert 'index=True not supported' in str(excinfo.value)
def test_run_persistence_interval_too_long(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('48h'), # too long
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_interval_not_midnight_to_midnight(session,
site_metadata,
obs_5min_begin):
# not midnight to midnight
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=22),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2200Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_incompatible_issue(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2330Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'incompatible' in str(excinfo.value).lower()
def test_run_persistence_fx_too_short(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1min'),
run_length=pd.Timedelta('3min'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'requires observation.interval_length' in str(excinfo.value)
def test_run_persistence_incompatible_instant_fx(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'instantaneous forecast' in str(excinfo.value).lower()
def test_run_persistence_incompatible_instant_interval(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
obs = obs_5min_begin.replace(interval_label='instantaneous',
interval_length=pd.Timedelta('10min'))
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs, forecast, run_time,
issue_time)
assert 'identical interval length' in str(excinfo.value)
def test_verify_nwp_forecasts_compatible(ac_power_forecast_metadata):
fx0 = ac_power_forecast_metadata
fx1 = replace(fx0, run_length=pd.Timedelta('10h'), interval_label='ending')
df = pd.DataFrame({'forecast': [fx0, fx1], 'model': ['a', 'b']})
errs = main._verify_nwp_forecasts_compatible(df)
assert set(errs) == {'model', 'run_length', 'interval_label'}
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', True),
('{"is_reference_persistence_forecast": true}', False),
('{"is_reference_forecast": "True"}', True),
('{"is_reference_forecast":"True"}', True),
('is_reference_forecast" : "True"}', True),
('{"is_reference_forecast" : true, "otherkey": badjson, 9}', True),
('reference_forecast": true', False),
('{"is_reference_forecast": false}', False),
("is_reference_forecast", False)
])
def test_is_reference_forecast(string, expected):
assert main._is_reference_forecast(string) == expected
def test_find_reference_nwp_forecasts_json_err(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
extra_params = '{"model": "themodel", "is_reference_forecast": true}'
fxs = [replace(ac_power_forecast_metadata, extra_parameters=extra_params),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "yes"}'),
replace(ac_power_forecast_metadata, extra_parameters='{"is_reference_forecast": true'), # NOQA
replace(ac_power_forecast_metadata, extra_parameters='')]
out = main.find_reference_nwp_forecasts(fxs)
assert logger.warning.called
assert len(out) == 1
def test_find_reference_nwp_forecasts_no_model(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
fxs = [replace(ac_power_forecast_metadata, extra_parameters='{}',
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 0
assert logger.debug.called
assert logger.error.called
def test_find_reference_nwp_forecasts_no_init(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 2
assert out.next_issue_time.unique() == [None]
assert out.piggyback_on.unique() == ['0']
def test_find_reference_nwp_forecasts(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(
fxs, pd.Timestamp('20190501T0000Z'))
assert len(out) == 2
assert out.next_issue_time.unique()[0] == pd.Timestamp('20190501T0500Z')
assert out.piggyback_on.unique() == ['0']
@pytest.fixture()
def forecast_list(ac_power_forecast_metadata):
model = 'nam_12km_cloud_cover_to_hourly_mean'
prob_dict = ac_power_forecast_metadata.to_dict()
prob_dict['constant_values'] = (0, 50, 100)
prob_dict['axis'] = 'y'
prob_dict['extra_parameters'] = '{"model": "gefs_half_deg_to_hourly_mean", "is_reference_forecast": true}' # NOQA
return [replace(ac_power_forecast_metadata,
extra_parameters=(
'{"model": "%s", "is_reference_forecast": true}'
% model),
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "gfs_quarter_deg_hourly_to_hourly_mean", "is_reference_forecast": true}', # NOQA
forecast_id='1'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='2',
variable='ghi'),
datamodel.ProbabilisticForecast.from_dict(prob_dict),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='3',
variable='dni',
provider='Organization 2'
),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "badmodel", "is_reference_forecast": true}', # NOQA
forecast_id='4'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "6", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='5',
variable='ghi'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": false}' % model, # NOQA
forecast_id='7',
variable='ghi'),
]
def test_process_nwp_forecast_groups(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 4
@pytest.mark.parametrize('run_time', [None, pd.Timestamp('20190501T0000Z')])
def test_process_nwp_forecast_groups_issue_time(mocker, forecast_list,
run_time):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert post_vals.call_count == 4
run_nwp.assert_called_with(mocker.ANY, mocker.ANY, mocker.ANY,
pd.Timestamp('20190501T0500Z'))
def test_process_nwp_forecast_groups_missing_var(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-3])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert logger.warning.called
assert post_vals.call_count == 4
def test_process_nwp_forecast_groups_bad_model(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[4:-1])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 0
def test_process_nwp_forecast_groups_missing_runfor(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[-2:])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert api.post_forecast_values.call_count == 0
@pytest.mark.parametrize('ind', [0, 1, 2])
def test__post_forecast_values_regular(mocker, forecast_list, ind):
api = mocker.MagicMock()
fx = forecast_list[ind]
main._post_forecast_values(api, fx, [0], 'whatever')
assert api.post_forecast_values.call_count == 1
def test__post_forecast_values_cdf(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
main._post_forecast_values(api, fx, vals, 'gefs')
assert api.post_probabilistic_forecast_constant_value_values.call_count == 3 # NOQA
def test__post_forecast_values_cdf_not_gefs(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(ValueError):
main._post_forecast_values(api, fx, vals, 'gfs')
def test__post_forecast_values_cdf_less_cols(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(10)})
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, vals, 'gefs')
def test__post_forecast_values_cdf_not_df(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, ser, 'gefs')
def test__post_forecast_values_cdf_no_cv_match(mocker, forecast_list):
api = mocker.MagicMock()
fx = replace(forecast_list[3], constant_values=(
replace(forecast_list[3].constant_values[0], constant_value=3.0
),))
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(KeyError):
main._post_forecast_values(api, fx, vals, 'gefs')
@pytest.mark.parametrize('issue_buffer,empty', [
(pd.Timedelta('10h'), False),
(pd.Timedelta('1h'), True),
(pd.Timedelta('5h'), False)
])
def test_make_latest_nwp_forecasts(forecast_list, mocker, issue_buffer, empty):
session = mocker.patch('solarforecastarbiter.io.api.APISession')
session.return_value.get_user_info.return_value = {'organization': ''}
session.return_value.list_forecasts.return_value = forecast_list[:-3]
session.return_value.list_probabilistic_forecasts.return_value = []
run_time = pd.Timestamp('20190501T0000Z')
# last fx has different org
fxdf = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
process = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.process_nwp_forecast_groups') # NOQA
main.make_latest_nwp_forecasts('', run_time, issue_buffer)
if empty:
process.assert_not_called()
else:
assert_frame_equal(process.call_args[0][-1], fxdf)
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', False),
('{"is_reference_persistence_forecast": true}', True),
('{"is_reference_persistence_forecast": "True"}', True),
('{"is_reference_persistence_forecast":"True"}', True),
('is_reference_persistence_forecast" : "True"}', True),
('{"is_reference_persistence_forecast" : true, "otherkey": badjson, 9}',
True),
('reference_persistence_forecast": true', False),
('{"is_reference_persistence_forecast": false}', False),
("is_reference_persistence_forecast", False)
])
def test_is_reference_persistence_forecast(string, expected):
assert main._is_reference_persistence_forecast(string) == expected
@pytest.fixture
def perst_fx_obs(mocker, ac_power_observation_metadata,
ac_power_forecast_metadata):
observations = [
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
)
]
def make_extra(obs):
extra = (
'{"is_reference_persistence_forecast": true,'
f'"observation_id": "{obs.observation_id}"'
'}'
)
return extra
forecasts = [
ac_power_forecast_metadata.replace(
name='FX0',
extra_parameters=make_extra(observations[0]),
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX no persist',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX bad js',
extra_parameters='is_reference_persistence_forecast": true other',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
)
]
return forecasts, observations
def test_generate_reference_persistence_forecast_parameters(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
# one hour ahead forecast, so 14Z was made at 13Z
# enough data to do 14Z and 15Z issue times but not 16Z
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
)
assert param_list[1] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_forecast_yet(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 1
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_data(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.NaT, pd.NaT)
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_diff_org(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': 'a new one'}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_not_reference_fx(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(extra_parameters='') for fx in forecasts]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_no_obs_id(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts[0] = forecasts[0].replace(
extra_parameters='{"is_reference_persistence_forecast": true}')
forecasts[1] = forecasts[1].replace(
extra_parameters='{"is_reference_persistence_forecast": true, "observation_id": "idnotinobs"}') # NOQA
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_ending_label(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(
interval_label='ending', lead_time_to_start=pd.Timedelta('0h'))
for fx in forecasts]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T16:00Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 3
assert param_list == [
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
),
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
),
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T16:00Z'),
False
)
]
def test_generate_reference_persistence_forecast_parameters_no_lead(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(
lead_time_to_start=pd.Timedelta('0h'))
for fx in forecasts]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T16:00Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list == [
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
),
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T16:00Z'),
False
)
]
def test_generate_reference_persistence_forecast_parameters_off_time(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:10Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
# one hour ahead forecast, so 14Z was made at 13Z
# enough data to do 14Z and 15Z issue times but not 16Z
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list == [
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
),
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
),
]
def test_generate_reference_persistence_forecast_parameters_multiple(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts[0] = forecasts[0].replace(
extra_parameters=(forecasts[0].extra_parameters[:-1] +
', "index_persistence": true}')
)
forecasts[1] = forecasts[1].replace(
extra_parameters=(
'{"is_reference_persistence_forecast": true, "observation_id": "' +
observations[1].observation_id + '"}'))
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list == [
(
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
True
),
(
forecasts[1], observations[1],
pd.Timestamp('2020-05-20T14:00Z'),
False
)
]
def test_generate_reference_persistence_forecast_parameters_up_to_date(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T13:59Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
# next would be at 14 and use data incl 13:59:59
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_make_latest_persistence_forecasts(mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts += [forecasts[0].replace(
extra_parameters=(forecasts[0].extra_parameters[:-1] +
', "index_persistence": true}'))]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
session.list_forecasts.return_value = forecasts
session.list_observations.return_value = observations
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
mocker.patch(
'solarforecastarbiter.reference_forecasts.main.api.APISession',
return_value=session)
run_pers = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_persistence')
main.make_latest_persistence_forecasts('', max_run_time)
assert run_pers.call_count == 4
assert session.post_forecast_values.call_count == 4
assert [l[1]['index'] for l in run_pers.call_args_list] == [
False, False, True, True]
def test_make_latest_persistence_forecasts_some_errors(mocker, perst_fx_obs):
# test that some persistence forecast parameters are invalid for the
# observation and that no peristence values are posted
# and that the failure doesn't interrupt other posts
forecasts, observations = perst_fx_obs
forecasts += [forecasts[0].replace(
extra_parameters=(forecasts[0].extra_parameters[:-1] +
', "index_persistence": true}'))]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), | pd.Timestamp('2020-05-20T14:00Z') | pandas.Timestamp |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gps_building_blocks.ml.statistical_inference.inference."""
from unittest import mock
from absl.testing import parameterized
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import datasets
from sklearn import model_selection
from absl.testing import absltest
from gps_building_blocks.ml.statistical_inference import data_preparation
class InferenceTest(parameterized.TestCase):
_missing_data = pd.DataFrame(
data=[[np.nan, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, np.nan]],
columns=['first', 'second'])
def test_missing_value_emits_warning_twice(self):
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
def test_check_data_raises_exception_on_missing_data(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(data_preparation.MissingValueError):
inference_data.data_check(raise_on_error=True)
def test_invalid_target_column_raise_exception(self):
with self.assertRaises(KeyError):
data_preparation.InferenceData(
initial_data=self._missing_data,
target_column='non_ci_sono')
def test_impute_missing_values_replaced_with_mean(self):
inference_data = data_preparation.InferenceData(self._missing_data)
expected_result = pd.DataFrame(
data=[[0.4000, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, 1.0000]],
columns=['first', 'second'])
result = inference_data.impute_missing_values(strategy='mean')
pd.testing.assert_frame_equal(result, expected_result)
def test_fixed_effect_raise_exception_on_categorical_covariate(self):
data = pd.DataFrame(
data=[['0', 0.0, '1', 3.0],
['1', 0.0, '2', 2.0],
['1', 1.0, '3', 2.0],
['1', 1.0, '4', 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
inference_data = data_preparation.InferenceData(data)
with self.assertRaises(data_preparation.CategoricalCovariateError):
inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
def test_fixed_effect_demeaning_subtract_mean_in_groups(self):
data = pd.DataFrame(
data=[['0', 0.0, 1, 3.0],
['1', 0.0, 2, 2.0],
['1', 1.0, 3, 2.0],
['1', 1.0, 4, 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
expected_result = pd.DataFrame(
data=[['0', 0.0, 2.5, 2.0],
['1', 0.0, 2.5, 2.0],
['1', 1.0, 2.0, 2.5],
['1', 1.0, 3.0, 1.5]],
columns=data.columns,
index=data.index).set_index(['control_1', 'control_2'], append=True)
inference_data = data_preparation.InferenceData(data)
result = inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
pd.testing.assert_frame_equal(result, expected_result)
def test_address_low_variance_removes_column(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0],
[0.0, 1.0, 0.0, 10.0],
[1.0, 1.0, 0.0, 5.00],
[1.0, 0.0, 0.0, 0.00]],
columns=['control', 'variable', 'variable_1', 'outcome'])
expected_result = pd.DataFrame(
data=[[0.0, 1.0, 10.0],
[0.0, 1.0, 10.0],
[1.0, 1.0, 5.00],
[1.0, 0.0, 0.00]],
columns=['control', 'variable', 'outcome'])
inference_data = data_preparation.InferenceData(
data, target_column='outcome')
result = inference_data.address_low_variance(drop=True)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_raises_error_on_singular_correlation_matrix(self):
singular_correlation_matrix_df = pd.DataFrame(
data=[[1.1, 2.1, 3.1, 4.1, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
singular_correlation_matrix_df, target_column='outcome')
with self.assertRaises(data_preparation.SingularDataError):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_raises_error_on_ill_conditioned_correlation_matrix(self):
ill_conditioned_correlation_matrix_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0],
[0.0, 2.0, 3.0, 0.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
ill_conditioned_correlation_matrix_df, target_column='outcome')
with self.assertRaises(data_preparation.SingularDataError):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_error_has_correct_message(self):
ill_conditioned_correlation_matrix_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0],
[0.0, 2.0, 3.0, 0.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
ill_conditioned_correlation_matrix_df, target_column='outcome')
expected_message = (
'Inference Data has a singular or nearly singular correlation matrix. '
'This could be caused by extremely correlated or collinear columns. '
'The three pairs of columns with the highest absolute correlation '
'coefficients are: (control,variable_3): 0.970, (variable_1,variable_3)'
': -0.700, (control,variable_1): -0.577. This could also be caused by '
'columns with extremiely low variance. Recommend running the '
'address_low_variance() method before VIF. Alternatively, consider '
'running address_collinearity_with_vif() with '
'use_correlation_matrix_inversion=False to avoid this error.'
)
with self.assertRaises(
data_preparation.SingularDataError, msg=expected_message):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_noise_injection_catches_perfect_correlation(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
iris_data['perfectly_correlated_column'] = iris_data['petal length (cm)']
expected_result = iris_data.drop(
columns=['petal length (cm)', 'perfectly_correlated_column'])
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='quick',
drop=True,
handle_singular_data_errors_automatically=True,
vif_threshold=50.0)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_noise_injection_catches_perfect_collinearity(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
iris_data['perfectly_collinear_column'] = iris_data[
'petal length (cm)'] + iris_data['petal width (cm)']
expected_result = iris_data.drop(columns=[
'petal length (cm)', 'petal width (cm)', 'perfectly_collinear_column'
])
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='quick',
drop=True,
handle_singular_data_errors_automatically=True,
vif_threshold=50.0)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_noise_injection_fails_correctly_when_too_few_samples(self):
too_few_samples_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
too_few_samples_df, target_column='outcome')
expected_regex = (
'Automatic attempt to resolve SingularDataError by '
'injecting artifical noise to the data has failed. This '
'probably means the dataset has too many features relative '
'to the number of samples.')
with self.assertRaisesRegex(data_preparation.SingularDataError,
expected_regex):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=True)
def test_vif_method_fails_correctly_with_unknown_value(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(ValueError):
inference_data.address_collinearity_with_vif(
vif_method='incorrect_value')
@parameterized.named_parameters({
'testcase_name': 'scale_10',
'scaling': 10,
}, {
'testcase_name': 'scale_50',
'scaling': 50,
}, {
'testcase_name': 'scale_-50',
'scaling': -50,
})
def test_minmaxscaling_drops_appropriate_variables(self, scaling):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'outcome'])
data = data * scaling
expected_result = data[['variable_1', 'outcome']]
inference_data = data_preparation.InferenceData(
data)
result = inference_data.address_low_variance(
threshold=.15,
drop=True,
minmax_scaling=True,
)
pd.testing.assert_frame_equal(result, expected_result)
def test_zscored_input_raises_warning(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'variable_3'])
data = data.apply(stats.zscore).fillna(0)
inference_data = data_preparation.InferenceData(data)
with self.assertWarns(Warning):
_ = inference_data.address_low_variance()
def test_minmaxscaling_with_invalid_threshold_raises_warning(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'variable_3'])
inference_data = data_preparation.InferenceData(data)
with self.assertWarns(Warning):
_ = inference_data.address_low_variance(minmax_scaling=True, threshold=.5)
def test_address_collinearity_with_vif_removes_column(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
expected_result = iris_data.drop(columns='petal length (cm)')
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='sequential',
drop=True)
pd.testing.assert_frame_equal(result, expected_result)
def test_encode_categorical_covariate_dummy_variable_2(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 'a', 10.0],
[0.0, 1.0, 'b', 10.0],
[1.0, 1.0, 'c', 5.00],
[1.0, 0.0, 'a', 0.00]],
columns=['control', 'variable_1', 'variable_2', 'outcome'])
expected_result = pd.DataFrame(
data=[[0.0, 1.0, 10.0, 1, 0, 0],
[0.0, 1.0, 10.0, 0, 1, 0],
[1.0, 1.0, 5.00, 0, 0, 1],
[1.0, 0.0, 0.00, 1, 0, 0]],
columns=[
'control', 'variable_1', 'outcome', 'variable_2_a', 'variable_2_b',
'variable_2_c'
])
inference_data = data_preparation.InferenceData(
data, target_column='outcome')
result = inference_data.encode_categorical_covariates(
columns=['variable_2'])
pd.testing.assert_frame_equal(result, expected_result)
@parameterized.named_parameters(
('single_selections', ['1', '2', '3'], ['1', '2', '3']),
('double_selection', ['1,2', '3'], ['1', '2', '3']),
('early_stopping', ['1', ''], ['1']),
('all_at_once', ['1,2,3'], ['1', '2', '3']),
)
def test_address_collinearity_with_vif_interactive(
self, user_inputs, expected_dropped):
dataframe = pd.DataFrame(
data=[[1.1, 2.1, 3.1, 4.1, 0],
[1.0, 2.0, 3.0, 4.0, 0],
[1.0, 2.0, 3.0, 4.0, 0],
[1.0, 2.0, 3.0, 4.0, 1]],
columns=['1', '2', '3', '4', 'target'])
data = data_preparation.InferenceData(dataframe, target_column='target')
with mock.patch.object(data_preparation, '_input_mock') as input_mock:
# Avoid Colab\Notebook prints in tests output
with mock.patch.object(data_preparation, '_print_mock') as _:
user_inputs = list(reversed(user_inputs))
input_mock.side_effect = lambda x: user_inputs.pop()
result = data.address_collinearity_with_vif(
vif_method='interactive',
drop=True,
use_correlation_matrix_inversion=False
)
pd.testing.assert_frame_equal(
result,
dataframe.drop(expected_dropped, axis=1))
@parameterized.named_parameters(
('onehot_returns_expected_bins', False, False, pd.DataFrame(
[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1]],
columns=['variable_(-0.02, 4.0]', 'variable_(4.0, 8.0]',
'variable_(8.0, 12.0]', 'variable_(12.0, 16.0]',
'variable_(16.0, 20.0]'])),
('equal_sized_onehot_returns_expected_bins', True, False, pd.DataFrame(
[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]],
columns=['variable_(-0.001, 2.0]', 'variable_(2.0, 4.0]',
'variable_(4.0, 6.0]', 'variable_(6.0, 8.0]',
'variable_(8.0, 20.0]'])),
('scalar_numeric_returns_expected_bins', False, True, pd.DataFrame(
[0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4], columns=['variable'])),
('equal_sized_numeric_expected_bins', True, True, pd.DataFrame(
[0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4], columns=['variable'])),
)
def test_descretize(self, equal_sized_bins, numeric, expected_result):
data = data_preparation.InferenceData(pd.DataFrame(
data=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20],
columns=['variable']))
result = data.discretize_numeric_covariate(
'variable', equal_sized_bins=equal_sized_bins, bins=5, numeric=numeric)
pd.testing.assert_frame_equal(result, expected_result, check_dtype=False)
@parameterized.named_parameters(
('with_groups_kfold_as_int',
3,
np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 3]),
[pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5]}),
pd.DataFrame({'variable': [2, 3, 6, 7, 8, 9]},
index=[2, 3, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5, 6, 7, 8, 9]},
index=[0, 1, 4, 5, 6, 7, 8, 9])],
[pd.DataFrame({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5]),
pd.DataFrame({'variable': [2, 3]}, index=[2, 3])]),
('with_groups_kfold_as_object',
model_selection.GroupKFold(n_splits=3),
np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 3]),
[pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5]}),
pd.DataFrame({'variable': [2, 3, 6, 7, 8, 9]},
index=[2, 3, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5, 6, 7, 8, 9]},
index=[0, 1, 4, 5, 6, 7, 8, 9])],
[pd.DataFrame({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5]),
pd.DataFrame({'variable': [2, 3]}, index=[2, 3])]),
)
def test_split_with_groups_yields_expected_folds_with_non_overlaping_groups(
self,
cross_validation,
groups,
expected_trains,
expected_tests):
data = data_preparation.InferenceData(
pd.DataFrame({
'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}))
iterator = zip(data.split(cross_validation=cross_validation, groups=groups),
expected_trains,
expected_tests)
for (train_data, test_data), expected_train, expected_test in iterator:
train_groups = set(groups[train_data.data.index.tolist()])
test_groups = set(groups[test_data.data.index.tolist()])
pd.testing.assert_frame_equal(
train_data.data, expected_train, check_dtype=False)
pd.testing.assert_frame_equal(
test_data.data, expected_test, check_dtype=False)
self.assertEmpty(train_groups.intersection(test_groups))
@parameterized.named_parameters(
('without_groups_kfold_as_int', 3,
[pd.DataFrame({'variable': [4, 5, 6, 7, 8, 9]},
index=[4, 5, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 7, 8, 9]},
index=[0, 1, 2, 3, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5, 6]},
index=[0, 1, 2, 3, 4, 5, 6])],
[pd.DataFrame({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3]),
pd.DataFrame({'variable': [4, 5, 6]}, index=[4, 5, 6]),
pd.DataFrame({'variable': [7, 8, 9]}, index=[7, 8, 9])]),
('without_groups_kfold_as_object',
model_selection.KFold(n_splits=3),
[pd.DataFrame({'variable': [4, 5, 6, 7, 8, 9]},
index=[4, 5, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 7, 8, 9]},
index=[0, 1, 2, 3, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5, 6]},
index=[0, 1, 2, 3, 4, 5, 6])],
[pd.DataFrame({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3]),
| pd.DataFrame({'variable': [4, 5, 6]}, index=[4, 5, 6]) | pandas.DataFrame |
from datetime import date, timedelta
import numpy as np
import pandas as pd
IDENT_BASE = "ABC"
def gen_identifiers(n=3000):
return [IDENT_BASE + str(x).zfill(6)
for x in range(n)]
def gen_dates(n=3000):
today = date.today()
start = today - timedelta(days=n)
return pd.date_range(start, today)
def gen_data():
ids = gen_identifiers()
dates = gen_dates()
data = np.random.random(len(ids))
results = []
for date in dates:
df = pd.DataFrame({
'identifier': ids,
'value': data
})
df['date'] = date
results.append(df)
return | pd.concat(results) | pandas.concat |
import pybullet as p
import time
import pybullet_data
import math as m
import random as r
import pandas as pd
physicsClient = p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-10)
planeId = p.loadURDF("../plane/plane.urdf")
robotStartPos = [0,0,0.2]
robotStartOrientation = p.getQuaternionFromEuler([0,0,0])
robotId = p.loadURDF("../../robot/PP/urdf/PP.urdf", robotStartPos, robotStartOrientation)
mode = p.POSITION_CONTROL
number_of_joints = p.getNumJoints(robotId)
print("Running...")
for i in range (100):
p.stepSimulation()
robotPos1, robotOrn1 = p.getBasePositionAndOrientation(robotId)
id_revolute_joints = [0, 3, 6, 9,
1, 4, 7, 10,
2, 5, 8, 11]
speeds = []
best_speeds = []
evals = []
s_max = 0
time_diff, t_0, t = 0, 0, 0
for i in range(1000):
# a = a_i + b_i*m.sin(i*w + c_i)
a_i_armpit, a_i_elbow, a_i_knee = r.uniform(0.01,0.4), r.uniform(0.01,0.4), r.uniform(0.01,0.4)
b_armpit = r.uniform(0.01, 0.6)
b_elbow = r.uniform(0.01, 1.2)
b_knee = r.uniform(0.01, 1)
c_i_armpit, c_i_elbow, c_i_knee = r.uniform(0.01,1), r.uniform(0.01,1), r.uniform(0.01,1)
w_i = r.uniform(0.001,0.1)
for x in range(1000):
p.stepSimulation()
armpit = a_i_armpit + b_armpit*m.sin(x*w_i + c_i_armpit)
elbow = a_i_elbow + b_elbow*m.sin(x*w_i + c_i_elbow)
knee = a_i_knee + b_knee*m.sin(x*w_i + c_i_knee)
p.setJointMotorControlArray(robotId, id_revolute_joints, controlMode=mode, targetPositions=[-armpit, -armpit, armpit, armpit, elbow, -elbow, -elbow, elbow, knee, -knee, -knee, knee])
if (x==0):
t_0 = time.time()
if (x==999):
t = time.time()
robotPos2, robotOrn2 = p.getBasePositionAndOrientation(robotId)
# Calculate distance travelled
x = robotPos2[0]-robotPos1[0]
y = robotPos2[1]-robotPos1[1]
dis = (x**2 + y**2)**0.5
# Calculate time passed
time_diff = t-t_0
# Calculate speed
s = dis/time_diff
print(s)
if (s>s_max):
s_max = s
best_speeds.append(s_max)
speeds.append(s_max)
evals.append(i)
print(a_i_armpit, a_i_elbow, a_i_knee, b_armpit, b_elbow, b_knee, c_i_armpit, c_i_elbow, c_i_knee, w_i)
else:
speeds.append(s)
best_speeds.append(s_max)
evals.append(i)
p.resetBasePositionAndOrientation(robotId, robotPos1, robotOrn1)
df = | pd.DataFrame({"Best Overall Speed" : best_speeds, "Number of Evaluations" : evals, "Speed": speeds}) | pandas.DataFrame |
# %%
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from catboost import CatBoostClassifier, CatBoostRegressor
from IPython.core.interactiveshell import InteractiveShell
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# Setting styles
InteractiveShell.ast_node_interactivity = "all"
sns.set(style="whitegrid", color_codes=True, rc={"figure.figsize": (12.7, 9.27)})
random_state = 123
# %%
df = pd.read_csv(os.path.join("data", "processed", "train.csv"))
df = df.drop(columns=["train", "relativeposition", "spaceid"])
df_valid = pd.read_csv(os.path.join("data", "processed", "test.csv"))
df_valid = df_valid.drop(columns=["train", "relativeposition", "spaceid"])
# %%
X = df.drop(columns=["longitude", "latitude", "floor", "buildingid"])
y = pd.DataFrame(
{
"lon": df.longitude,
"lat": df.latitude,
"floor": df.floor,
"building": df.buildingid,
}
)
X_valid = df_valid.drop(columns=["longitude", "latitude", "floor", "buildingid"])
y_valid = pd.DataFrame(
{
"lon": df_valid.longitude,
"lat": df_valid.latitude,
"floor": df_valid.floor,
"building": df_valid.buildingid,
}
)
# %% level 1 models
catboost_lon_level1 = CatBoostRegressor(
loss_function="RMSE", eval_metric="RMSE", random_state=random_state
)
catboost_lat_level1 = CatBoostRegressor(
loss_function="RMSE", eval_metric="RMSE", random_state=random_state
)
catboost_building_level1 = CatBoostClassifier(
loss_function="MultiClass", eval_metric="MultiClass", random_state=random_state
)
catboost_floor_level1 = CatBoostClassifier(
loss_function="MultiClass", eval_metric="MultiClass", random_state=random_state
)
# %% Longitude
catboost_lon_level1.fit(X, y.lon)
# %% Latitude
catboost_lat_level1.fit(X, y.lat)
# %% Buildings
catboost_building_level1.fit(X, y.building)
# %% Floors
catboost_floor_level1.fit(X, y.floor)
# %% Predicting with models
pred_lon = catboost_lon_level1.predict(X)
pred_lat = catboost_lat_level1.predict(X)
pred_buildings = catboost_building_level1.predict_proba(X)
pred_floors = catboost_floor_level1.predict_proba(X)
# untangling predictions for different classes
# not sure which predictions refers to which building and floor
pred_building0 = pred_buildings[:, 0]
pred_building1 = pred_buildings[:, 1]
pred_building2 = pred_buildings[:, 2]
pred_floor0 = pred_floors[:, 0]
pred_floor1 = pred_floors[:, 1]
pred_floor2 = pred_floors[:, 2]
pred_floor3 = pred_floors[:, 3]
pred_floor4 = pred_floors[:, 4]
pred_valid_lon = catboost_lon_level1.predict(X_valid)
pred_valid_lat = catboost_lat_level1.predict(X_valid)
pred_valid_buildings = catboost_building_level1.predict_proba(X_valid)
pred_valid_floors = catboost_floor_level1.predict_proba(X_valid)
# untangling predictions for different classes
# not sure which predictions refers to which building and floor
pred_valid_building0 = pred_valid_buildings[:, 0]
pred_valid_building1 = pred_valid_buildings[:, 1]
pred_valid_building2 = pred_valid_buildings[:, 2]
pred_valid_floor0 = pred_valid_floors[:, 0]
pred_valid_floor1 = pred_valid_floors[:, 1]
pred_valid_floor2 = pred_valid_floors[:, 2]
pred_valid_floor3 = pred_valid_floors[:, 3]
pred_valid_floor4 = pred_valid_floors[:, 4]
# %% Creating a new training sets from the predictions
X_comb = pd.DataFrame(
{
"lon": pred_lon,
"lat": pred_lat,
"building0": pred_building0,
"building1": pred_building1,
"building2": pred_building2,
"floor0": pred_floor0,
"floor1": pred_floor1,
"floor2": pred_floor2,
"floor3": pred_floor3,
"floor4": pred_floor4,
}
)
# giving the second level models the predictions of other models
X_lon = pd.concat([X, X_comb], axis="columns").drop(columns=["lon"])
X_lat = | pd.concat([X, X_comb], axis="columns") | pandas.concat |
import glob
from pathlib import Path
from random import random
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image, ImageStat
def replace_white(im_file):
directory = im_file.split("/")[1]
file = im_file.split("/")[2]
im = Image.open(im_file)
width, height = im.size
pixels = im.load()
mean = 119
std = 20
# cnt = 0
# white_cnt = 0
# brightness = 140
#
# darkness_factor = 0.9
mode = im.mode
new_img = Image.new(mode, (width, height))
new_pixels = new_img.load()
print(pixels)
print(pixels[0, 0])
print(im_file)
for x in range(width):
for y in range(height):
(r, g, b, *a) = pixels[x, y]
l = luminocity(r, g, b)
if random() > 0.0:
if l < mean:
if len(a) == 1:
new_pixels[x, y] = (
r + r_component(std),
g + g_component(std),
b + b_component(std),
a[0]
)
else:
new_pixels[x, y] = (
r + r_component(std),
g + g_component(std),
b + b_component(std),
)
else:
if len(a) == 1:
new_pixels[x, y] = (
r - r_component(std),
g - g_component(std),
b - b_component(std),
a[0],
)
else:
new_pixels[x, y] = (
r - r_component(std),
g - g_component(std),
b - b_component(std),
)
else:
new_pixels[x, y] = pixels[x, y]
# if r >= 250 and g >= 250 and b >= 250:
# white_cnt += 1
# new_pixels[x, y] = (r, g, b, 0)
# if r > brightness or g > brightness or b > brightness and len(a) == 1 and a[0] != 0:
# # print("{} {}".format(x, y))
# cnt += 1
# new_pixels[x, y] = (int(r * darkness_factor), int(g * darkness_factor), int(b * darkness_factor), a[0])
# else:
# new_pixels[x, y] = pixels[x, y]
# print(white_cnt)
# stat = ImageStat.Stat(new_img)
# print("{},{}".format(directory + "/" + file, (stat.mean[0], stat.rms[0])))
Path("targets/" + directory + "/").mkdir(parents=True, exist_ok=True)
new_img.save("targets/" + directory + "/" + file)
# new_img.show()
# width, height = (1200, 800)
# mode = 'RGB'
# my_image = Image.new(mode, (width, height))
#
# # Load all the pixels.
# my_pixels = my_image.load()
#
# # Loop through all the pixels, and set each color randomly.
# for x in range(width):
# for y in range(height):
# r = randint(0, 255)
# g = randint(0, 255)
# b = randint(0, 255)
# pixel = (r, g, b)
# my_pixels[x, y] = pixel
#
# my_image.show()
def stats_report(files: List[str]):
list = []
for f in files:
data = distribution_of_luminocity(f)
data_df = pd.DataFrame(data)
dict = data_df.describe().to_dict()
dict = dict[0]
dict["name"] = f
list.append(pd.Series(dict))
df = pd.DataFrame(list)
df.to_csv("result.csv")
def replace_white_with_alpha(im_file):
directory = im_file.split("/")[1]
file = im_file.split("/")[2]
im = Image.open(im_file)
width, height = im.size
pixels = im.load()
image = Image.new("RGBA", im.size)
new_pixels = image.load()
for x in range(width):
for y in range(height):
(r, g, b, *a) = pixels[x, y]
if r >= 240 and g >= 240 and b >= 240:
new_pixels[x, y] = (255, 255, 255, 0)
else:
new_pixels[x, y] = pixels[x, y]
Path("targets/" + directory + "/").mkdir(parents=True, exist_ok=True)
image.save("targets/" + directory + "/" + file)
def replace_alpha_with_white(file):
directory = file.split("/")[1]
file_name = file.split("/")[2]
im = Image.open(file)
image = Image.new("RGB", im.size, "WHITE")
image.paste(im, (0, 0), im)
Path("targets/" + directory + "/").mkdir(parents=True, exist_ok=True)
image.save("targets/" + directory + "/" + file_name)
def luminocity(r: int, g: int, b: int) -> float:
return (0.21 * r) + (0.72 * g) + (0.07 * b)
def r_component(value: int) -> int:
# return int(0.21 * value)
return value
def g_component(value: int) -> int:
# return int(0.72 * value)
return value
def b_component(value: int) -> int:
# return int(0.07 * value)
return value
def distribution_for_files(files: List[str]):
total_lumi = []
for f in files:
total_lumi.extend(distribution_of_luminocity(f))
df = | pd.DataFrame(total_lumi) | pandas.DataFrame |
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
import argparse
import numpy as np
import pandas as pd
from copy import deepcopy
import neat
# Path to file containing neat prescriptors. Here we simply use a
# recent checkpoint of the population from train_prescriptor.py,
# but this is likely not the most complementary set of prescriptors.
# Many approaches can be taken to generate/collect more diverse sets.
# Note: this set can contain up to 10 prescriptors for evaluation.
from covid_xprize.examples.prescriptors.neat.utils import prepare_historical_df, CASES_COL, IP_COLS, IP_MAX_VALUES, \
add_geo_id, get_predictions, PRED_CASES_COL
PRESCRIPTORS_FILE = 'neat-checkpoint-0'
# Number of days the prescriptors look at in the past.
NB_LOOKBACK_DAYS = 14
def prescribe(start_date_str: str,
end_date_str: str,
path_to_prior_ips_file: str,
path_to_cost_file: str,
output_file_path) -> None:
start_date = | pd.to_datetime(start_date_str, format='%Y-%m-%d') | pandas.to_datetime |
# !/usr/bin/env python
# coding: utf-8
'''
@File : boost.py
@Time : 2020/04/13 13:46:21
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
'''
# This is used to build catboost model using extracted features
import argparse
import gc
import time
import os
import math
import catboost
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import spearmanr
from sklearn.cluster import KMeans
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
random_seed = 2020
num_class = 50
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
all_popularity_filepath = "/home/wangkai/ICIP/feature/label/popularity_TRAIN_20337.csv"
cluster_center_filepath = "/home/wangkai/ICIP/feature/label/cluster_center.csv"
cluser_label_filepath = "/home/wangkai/ICIP/feature/label/cluster_label_20337.csv"
# random
train_popularity_filepath = "/home/wangkai/ICIP/feature/label/train_label_random.csv"
validate_popularity_filepath = "/home/wangkai/ICIP/feature/label/validate_label_random.csv"
# # postdate
train_popularity_filepath="/home/wangkai/ICIP/feature/label/train_label_datetaken.csv"
validate_popularity_filepath="/home/wangkai/ICIP/feature/label/validate_label_datetaken.csv"
number_columns = ["PhotoCount", "MeanViews", "Contacts", "GroupsCount", "NumSets", "GroupsAvgPictures",
"GroupsAvgMembers", "Ispro", "HasStats", "AvgGroupsMemb", "AvgGroupPhotos", "NumGroups"] # 12
text_columns = ["Tags", "Title", "Description"] # 3
first_columns = ["FlickrId", "UserId"] # 2
train_feature_filepath = {
"original": "/home/wangkai/ICIP/feature/train/train_feature_20337.csv",
"fasttext": "/home/wangkai/ICIP/feature/train/FastText_tags+des_20337.csv",
"tfidf": "/home/wangkai/ICIP/feature/train/Tfidf_tags+des_20337.csv",
"lsa": "/home/wangkai/ICIP/feature/train/LSA_tags+title+des_20337.csv",
"lda": "/home/wangkai/ICIP/feature/train/LDA_tags+title+des_20337.csv",
"wordchar": "/home/wangkai/ICIP/feature/train/wordchar_tags+title+des_20337.csv",
"userid": "/home/wangkai/ICIP/feature/train/UserId256_20337.csv",
"image": "/home/wangkai/ICIP/feature/train/ResNext101_image_20337.csv"
}
test_feature_filepath = {
"original": "/home/wangkai/ICIP/feature/test/test_feature_7693.csv",
"fasttext": "/home/wangkai/ICIP/feature/test/FastText_tags+des_7693.csv",
"tfidf": "/home/wangkai/ICIP/feature/test/Tfidf_tags+des_7693.csv",
"lsa": "/home/wangkai/ICIP/feature/test/LSA_tags+title+des_7693.csv",
"lda": "/home/wangkai/ICIP/feature/test/LDA_tags+title+des_7693.csv",
"wordchar": "/home/wangkai/ICIP/feature/test/wordchar_tags+title+des_7693.csv",
"userid": "/home/wangkai/ICIP/feature/test/UserId256_7693.csv",
"image": "/home/wangkai/ICIP/feature/test/ResNext101_image_7693.csv"
}
def clutser(num_class=num_class):
df_popularity = pd.read_csv(all_popularity_filepath)
# 归一化
normalized_popularity = df_popularity.iloc[:, 1:].div(
df_popularity["Day30"], axis=0)
# 聚类的label
kmeans = KMeans(n_clusters=num_class, init="k-means++", n_init=100, max_iter=10000,
random_state=random_seed, n_jobs=-1, algorithm="auto").fit(normalized_popularity)
df_label = pd.DataFrame(
{"FlickrId": df_popularity["FlickrId"], "label": kmeans.labels_})
df_label.to_csv(cluser_label_filepath, index=False)
# 聚类中心
df_cluster_center = pd.DataFrame(kmeans.cluster_centers_)
df_cluster_center.columns = ["day"+str(i+1) for i in range(30)]
df_cluster_center.insert(0, column="label", value=np.arange(num_class))
df_cluster_center.to_csv(cluster_center_filepath, index=False)
def load_feature(feature_list, flag="train"):
feature_path = train_feature_filepath if flag == "train" else test_feature_filepath
for i, feature_name in enumerate(feature_list):
print("Loading {} ...".format(feature_name))
feature = pd.read_csv(feature_path[feature_name])
print("feature: {}, len:{}".format(
feature_name, len(feature.columns)-1))
if i == 0:
all_feature = feature
else:
all_feature = pd.merge(all_feature, feature)
useless = text_columns
all_feature.drop(useless, axis=1, inplace=True)
print(all_feature)
return all_feature
def calssify_catboost(train, validate):
cat_features = ["UserId"]
# cat_features=[]
train_data = catboost.Pool(
train.iloc[:, 1:-31], train["label"], cat_features=cat_features)
validata_data = catboost.Pool(
validate.iloc[:, 1:-31], validate["label"], cat_features=cat_features)
model = catboost.CatBoostClassifier(iterations=10000, learning_rate=0.01, depth=6, objective="MultiClass", classes_count=num_class, eval_metric="Accuracy", l2_leaf_reg=3.0,
min_data_in_leaf=1, boosting_type="Plain", use_best_model=False, thread_count=-1, task_type="GPU", devices="0", random_state=random_seed, verbose=300, early_stopping_rounds=1000)
model = model.fit(train_data, eval_set=validata_data, plot=False)
# predict label
preds = model.predict(validata_data)
preds = preds.flatten()
print("\nValidate\nACC: {}\tTotal right: {}".format(
np.sum(preds == validate["label"])/len(preds), np.sum(preds == validate["label"])))
# feature importance
df_important = pd.DataFrame(
{"feature_name": model.feature_names_, "importance": model.feature_importances_})
df_important = df_important.sort_values(by=["importance"], ascending=False)
print(df_important)
df_predict_label = pd.DataFrame(
{"FlickrId": validate["FlickrId"], "preds_label": preds})
return model, df_predict_label
def regression_catboost(train, validate):
cat_features = ["UserId"]
# cat_features=[]
p_train, p_validate = np.log(
train["Day30"]/4+1), np.log(validate["Day30"]/4+1)
# p_train,p_validate=train["Day30"],validate["Day30"]
train_data = catboost.Pool(
train.iloc[:, 1:-31], p_train, cat_features=cat_features)
validata_data = catboost.Pool(
validate.iloc[:, 1:-31], p_validate, cat_features=cat_features)
model = catboost.CatBoostRegressor(iterations=35000, learning_rate=0.003, depth=6, objective="MAPE", eval_metric="MAPE", custom_metric=["RMSE", "MAE", "MAPE"], l2_leaf_reg=3.0, min_data_in_leaf=1, boosting_type="Plain", use_best_model=True, thread_count=-1, task_type="GPU", devices="0", random_state=random_seed, verbose=300, early_stopping_rounds=1000, fold_permutation_block=1, bagging_temperature=0)
# model=catboost.CatBoostRegressor(iterations=100000, learning_rate=0.1, depth=6, objective="RMSE", eval_metric="RMSE",custom_metric=["RMSE","MAE","MAPE"], l2_leaf_reg=3.0, min_data_in_leaf=1, boosting_type="Plain", use_best_model=True, thread_count=-1, task_type="CPU",devices="0", random_state=random_seed, verbose=300, early_stopping_rounds=500)
model.fit(train_data, eval_set=validata_data, plot=False)
preds_p_validate = model.predict(validata_data)
preds_day30 = (np.exp(preds_p_validate)-1)*4
src, _ = spearmanr(validate["Day30"], preds_day30)
df_important = pd.DataFrame(
{"feature_name": model.feature_names_, "importance": model.feature_importances_})
df_important = df_important.sort_values(by=["importance"], ascending=False)
print(df_important)
df_predict_day30 = pd.DataFrame(
{"FlickrId": validate["FlickrId"], "Day30": validate["Day30"], "preds_day30": preds_day30})
return model, df_predict_day30
def train(classify_feature_list, regression_feature_list):
df_label = pd.read_csv(cluser_label_filepath)
df_train_popularity = pd.read_csv(train_popularity_filepath)
df_validate_popularity = pd.read_csv(validate_popularity_filepath)
train_label = pd.merge(df_label, df_train_popularity,
on="FlickrId", how="inner")
validate_label = pd.merge(
df_label, df_validate_popularity, on="FlickrId", how="inner")
# Classify
classify_feature = load_feature(classify_feature_list, flag="train")
train = pd.merge(classify_feature, train_label, on="FlickrId", how="inner")
validate = pd.merge(classify_feature, validate_label,
on="FlickrId", how="inner")
classify_model, df_predict_label = calssify_catboost(train, validate)
df_predict_label.to_csv(
"/home/wangkai/ICIP/predict_label.csv", index=False)
df_predict_label = pd.read_csv("/home/wangkai/ICIP/predict_label.csv")
regression_feature = load_feature(regression_feature_list, flag="train")
train = pd.merge(regression_feature, train_label,
on="FlickrId", how="inner")
validate = pd.merge(regression_feature, validate_label,
on="FlickrId", how="inner")
regression_model, df_predict_day30 = regression_catboost(train, validate)
df_predict_day30.to_csv("/home/wangkai/ICIP/temp/predict_day30.csv",index=False)
df_predict_day30=pd.read_csv("/home/wangkai/ICIP/temp/predict_day30.csv")
df_preds = pd.merge(df_predict_label, df_predict_day30,
on="FlickrId", how="inner")
df_cluster_center = pd.read_csv(cluster_center_filepath)
df_temp = pd.merge(df_preds, df_cluster_center, how="left",
left_on="preds_label", right_on="label")
# FlickrId,preds,回归对时,真相
df_preds_result = pd.concat([df_temp["FlickrId"], df_temp.iloc[:, -30:].mul(df_temp["preds_day30"], axis=0),
df_temp.iloc[:, -30:].mul(df_temp["Day30"], axis=0), validate.iloc[:, -30:]], axis=1)
columns = ["FlickrId"]+["preds_day"+str(i+1) for i in range(30)]+[
"regression_truth"+str(i+1) for i in range(30)]+["truth"+str(i+1) for i in range(30)]
df_preds_result.columns = columns
# analysis
y_preds = np.array(df_preds_result.iloc[:, 1:31])
y_regression_true = np.array(df_preds_result.iloc[:, 31:61])
y_true = np.array(df_preds_result.iloc[:, 61:])
# 对于预测结果
rmse_errors = np.sqrt([mean_squared_error(y_true[i], y_preds[i])
for i in range(y_true.shape[0])])
trmse = stats.trim_mean(rmse_errors, 0.25)
median_rmse = np.median(rmse_errors)
src, _ = spearmanr(y_true[:, -1], y_preds[:, -1])
print("\n Predict:")
print("RMSE(trimmed 0.25): {}".format(trmse))
print("RMSE(median): {}".format(median_rmse))
print("SRC: {}".format(src))
# 对于回归为真
rmse_errors = np.sqrt([mean_squared_error(
y_true[i], y_regression_true[i]) for i in range(y_true.shape[0])])
trmse = stats.trim_mean(rmse_errors, 0.25)
median_rmse = np.median(rmse_errors)
src, _ = spearmanr(y_true[:, -1], y_regression_true[:, -1])
print("\n for regression is true:")
print("RMSE(trimmed 0.25): {}".format(trmse))
print("RMSE(median): {}".format(median_rmse))
print("SRC: {}".format(src))
# classify_model = 1
return classify_model, regression_model
def parse_arguments():
parser = argparse.ArgumentParser(description=" ICIP Catboost model")
parser.add_argument("-classify_f", "--classify_feature", type=str,
choices=["original", "fasttext", "tfidf",
"lsa", "lda", "wordchar", "userid", "image"],
nargs="?",
const=["original"],
default=["original", "userid"],
help="which feature will be used for classify")
parser.add_argument("-reg_f", "--regression_feature", type=str,
choices=["original", "fasttext", "tfidf",
"lsa", "lda", "wordchar", "userid", "image"],
nargs="?",
const=["original"],
default=["original", "userid", "lda", "wordchar"],
help="which feature will be used for regression")
parser.add_argument("-output", "--submission_path", type=str,
default="/home/wangkai/ICIP/submission",
help="ICIP file(.csv) will be submit path")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
# clutser(num_class=num_class)
calssify_model, regression_model = train(
args.classify_feature, args.regression_feature)
test_classify_feature=load_feature(args.classify_feature,flag="test")
predict_label=calssify_model.predict(test_classify_feature.iloc[:,1:])
df_predict_label=pd.DataFrame({"FlickrId":test_classify_feature["FlickrId"],"preds_label":predict_label.flatten()})
# predict Day30
test_regression_feature=load_feature(args.regression_feature,flag="test")
preds_p_test=regression_model.predict(test_regression_feature.iloc[:,1:])
preds_day30=(np.exp(preds_p_test)-1)*4
df_predict_day30=pd.DataFrame({"FlickrId":test_regression_feature["FlickrId"],"preds_day30":preds_day30})
df_preds=pd.merge(df_predict_label,df_predict_day30,on="FlickrId",how="inner")
df_cluster_center= | pd.read_csv(cluster_center_filepath) | pandas.read_csv |
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import pickle
import urllib.request
import json
from pprint import pprint
from pandas.io.json import json_normalize
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from math import sqrt
import os
import errno
from pymongo import MongoClient
import urllib.parse as urlparse
from influxdb import InfluxDBClient
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import TheilSenRegressor
from sklearn.datasets import make_regression
class Terminus(BaseHTTPRequestHandler):
def getAllNodeNames(self,client):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(self,client,node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(self,client,node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = pd.DataFrame(podNames_temp)
if dfpodNames.empty:
return dfpodNames
else:
allpodNames = dfpodNames[:]["value"]
return allpodNames
def getCPUUtilizationNode(self,client, node):
queryResult = client.query('SELECT * FROM "cpu/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/node_utilization'])
return dfcpuUtilization
def getCPUUtilizationPod(self,client, node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def getCPUUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def prepareCpuUtilization(self,client,node,ns_name, pod_name):
cpuUtilization = self.getCPUUtilizationNode(client,node)
podCpuUtilization = self.getCPUUtilizationPod(client,node,ns_name, pod_name)
containercpuUtilization = self.getCPUUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(cpuUtilization.index, cpuUtilization['value'] *1000, 'r', label="node") # plotting t, a separately
plt.plot(podCpuUtilization.index, podCpuUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containercpuUtilization.index, containercpuUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getMemoryUtilizationNode(self,client,node):
queryResult = client.query('SELECT * FROM "memory/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/node_utilization'])
return dfmemUtilization
def getMemoryUtilizationPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def getMemoryUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def prepareMemoryUtilization(self,client,node,ns_name, pod_name):
memoryUtilization = self.getMemoryUtilizationNode(client,node)
podMemoryUtilization = self.getMemoryUtilizationPod(client,node,ns_name, pod_name)
containerMemoryUtilization = self.getMemoryUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(memoryUtilization.index, memoryUtilization['value'], 'r', label="node") # plotting t, a separately
plt.plot(podMemoryUtilization.index, podMemoryUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containerMemoryUtilization.index, containerMemoryUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getNetworkTxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_rate'])
return dfmemUtilization
def getNetworkTxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx'])
return dfmemUtilization
def getNetworkTxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors'])
return dfmemUtilization
def getNetworkTxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors_rate'])
return dfmemUtilization
def prepareNetworkTxRateUtilization(self,client,node,ns_name, pod_name):
podNetworTxRate = self.getNetworkTxRatePod(client,node,ns_name, pod_name)
podNetworTx = self.getNetworkTxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkTxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkTxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworTxRate.index, podNetworTxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworTx.index, podNetworTx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getNetworkRxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_rate'])
return dfmemUtilization
def getNetworkRxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx'])
return dfmemUtilization
def getNetworkRxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors'])
return dfmemUtilization
def getNetworkRxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors_rate'])
return dfmemUtilization
def prepareNetworkRxRateUtilization(self,client,node,ns_name, pod_name):
podNetworRxRate = self.getNetworkRxRatePod(client,node,ns_name, pod_name)
podNetworRx = self.getNetworkRxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkRxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkRxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworRxRate.index, podNetworRxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworRx.index, podNetworRx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getRelevantNodeName(self,client,ns_name):
allNodeNames = self.getAllNodeNames(client)
#nsNames = getNamespaceNames(allNodeNames[0])
relevantNodes = []
for node in allNodeNames:
allPodNamesNode = self.getAllPodNames(client,node,'default')
if(not allPodNamesNode.empty):
relevantNodes.append(node)
return relevantNodes
def getNodeResourceUtilizationDf(self,client, nodeName):
Result_node_CPU = client.query("SELECT value from \"cpu/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_MEM = client.query("SELECT value from \"memory/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_CPU_Cores = client.query("SELECT mean(\"value\") FROM \"cpu/node_capacity\" where nodename = '"+nodeName+
"' AND type = 'node' GROUP BY time(1m)")
Result_node_mem_node = client.query("SELECT mean(\"value\")FROM \"memory/node_capacity\" where nodename = '"+
nodeName+"' AND type = 'node' GROUP BY time(1m)")
cpu_points = pd.DataFrame(Result_node_CPU.get_points())
cpu_points['time'] = pd.to_datetime(cpu_points['time'])
cpu_points = cpu_points.set_index('time')
cpu_points.columns = ['node_cpu_util']
mem_points = pd.DataFrame(Result_node_MEM.get_points())
mem_points['time'] = pd.to_datetime(mem_points['time'])
mem_points = mem_points.set_index('time')
mem_points.columns = ['node_mem_util']
cores_points = pd.DataFrame(Result_node_CPU_Cores.get_points())
cores_points['time'] = pd.to_datetime(cores_points['time'])
cores_points = cores_points.set_index('time')
cores_points.columns = ['node_cores']
mem_node_points = pd.DataFrame(Result_node_mem_node.get_points())
mem_node_points['time'] = pd.to_datetime(mem_node_points['time'])
mem_node_points = mem_node_points.set_index('time')
mem_node_points.columns = ['node_mem']
df_node =pd.concat([cpu_points, mem_points,cores_points,mem_node_points], axis=1)
return df_node
def getPodResourceUtilizationDf(self,client, node, ns_name, pod_name):
Result_Pod_CPU_usage = client.query('SELECT value FROM "cpu/usage_rate" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_MEM_usage = client.query('SELECT value from \"memory/usage\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_CPU_limit = client.query('SELECT mean(\"value\") FROM "cpu/limit" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_limit = client.query('SELECT mean(\"value\") from \"memory/limit\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
Result_Pod_CPU_requests = client.query('SELECT mean(\"value\") FROM "cpu/request" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_requests = client.query('SELECT mean(\"value\") from \"memory/request\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
cpu_points_usage = pd.DataFrame(Result_Pod_CPU_usage.get_points())
cpu_points_usage['time'] = pd.to_datetime(cpu_points_usage['time'])
cpu_points_usage = cpu_points_usage.set_index('time')
cpu_points_usage.columns = ['pod_cpu_usage']
mem_points_usage = pd.DataFrame(Result_Pod_MEM_usage.get_points())
mem_points_usage['time'] = pd.to_datetime(mem_points_usage['time'])
mem_points_usage = mem_points_usage.set_index('time')
mem_points_usage.columns = ['pod_mem_usage']
cpu_points_limits = pd.DataFrame(Result_Pod_CPU_limit.get_points())
cpu_points_limits['time'] = pd.to_datetime(cpu_points_limits['time'])
cpu_points_limits = cpu_points_limits.set_index('time')
cpu_points_limits.columns = ['pod_cpu_limit']
mem_points_limits = pd.DataFrame(Result_Pod_MEM_limit.get_points())
mem_points_limits['time'] = pd.to_datetime(mem_points_limits['time'])
mem_points_limits = mem_points_limits.set_index('time')
mem_points_limits.columns = ['pod_mem_limit']
cpu_points_request = pd.DataFrame(Result_Pod_CPU_requests.get_points())
cpu_points_request['time'] = pd.to_datetime(cpu_points_request['time'])
cpu_points_request = cpu_points_request.set_index('time')
cpu_points_request.columns = ['pod_cpu_request']
mem_points_request = pd.DataFrame(Result_Pod_MEM_requests.get_points())
mem_points_request['time'] = pd.to_datetime(mem_points_request['time'])
mem_points_request = mem_points_request.set_index('time')
mem_points_request.columns = ['pod_mem_request']
df_pod =pd.concat([cpu_points_usage, mem_points_usage,cpu_points_limits,mem_points_limits,cpu_points_request,mem_points_request ], axis=1)
return df_pod
def getRequestsDf(self,clientK6):
queryResult = clientK6.query('SELECT sum("value") FROM "vus" group by time(1m);')
vus = pd.DataFrame(queryResult['vus'])
vus.columns = ['vus','time']
vus = vus.set_index('time')
queryResultReqs = clientK6.query('SELECT sum("value") FROM "http_reqs" group by time(1m);')
reqs = pd.DataFrame(queryResultReqs['http_reqs'])
reqs.columns = ['requests','time']
reqs = reqs.set_index('time')
queryResultReqsDuration95 = clientK6.query('SELECT percentile("value", 95) FROM "http_req_duration" group by time(1m) ;')
reqs_duration95 = pd.DataFrame(queryResultReqsDuration95['http_req_duration'])
reqs_duration95.columns = [ 'requests_duration_percentile_95','time']
reqs_duration95 = reqs_duration95.set_index('time')
queryResultReqsDuration90 = clientK6.query('SELECT percentile("value", 90) FROM "http_req_duration" group by time(1m) ;')
reqs_duration90 = pd.DataFrame(queryResultReqsDuration90['http_req_duration'])
reqs_duration90.columns = ['requests_duration_percentile_90','time']
reqs_duration90 = reqs_duration90.set_index('time')
queryResultMaxDuration = clientK6.query('SELECT max("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_max = pd.DataFrame(queryResultMaxDuration['http_req_duration'])
reqs_duration_max.columns = ['requests_duration_max','time']
reqs_duration_max = reqs_duration_max.set_index('time')
queryResultMinDuration = clientK6.query('SELECT min("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_min = pd.DataFrame(queryResultMinDuration['http_req_duration'])
reqs_duration_min.columns = ['requests_duration_min','time']
reqs_duration_min = reqs_duration_min.set_index('time')
queryResultMeanDuration = clientK6.query('SELECT mean("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_mean = pd.DataFrame(queryResultMeanDuration['http_req_duration'])
reqs_duration_mean.columns = ['requests_duration_mean','time']
reqs_duration_mean = reqs_duration_mean.set_index('time')
queryResultMedianDuration = clientK6.query('SELECT median("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_median = pd.DataFrame(queryResultMedianDuration['http_req_duration'])
reqs_duration_median.columns = ['requests_duration_median','time']
reqs_duration_median = reqs_duration_median.set_index('time')
finalDF = pd.merge(vus, reqs, left_index=True, right_index=True)
finalDF = | pd.merge(finalDF, reqs_duration95, left_index=True, right_index=True) | pandas.merge |
from pymatreader import read_mat
from scipy import sparse
import numpy as np
import os
from datetime import datetime, timedelta
import pandas
from amftrack.pipeline.functions.image_processing.extract_graph import from_sparse_to_graph, generate_nx_graph, sparse_to_doc
import cv2
import json
import pandas as pd
from amftrack.transfer.functions.transfer import download, zip_file,unzip_file,upload
from tqdm.autonotebook import tqdm
import dropbox
#test_remote
#zfjn
#fjzz
path_code = os.getenv('HOME')+"/pycode/MscThesis/"
# path_code = r'C:\Users\coren\Documents\PhD\Code\AMFtrack'
# plate_info = pandas.read_excel(path_code+r'/plate_info/SummaryAnalizedPlates.xlsx',engine='openpyxl',header=3,)
API = str(np.load(os.getenv('HOME')+'/pycode/API_drop.npy'))
target = os.getenv('HOME')+'/pycode/data_info.json'
def get_path(date, plate, skeleton, row=None, column=None, extension=".mat"):
def get_number(number):
if number < 10:
return f"0{number}"
else:
return str(number)
root_path = (
r"//sun.amolf.nl/shimizu-data/home-folder/oyartegalvez/Drive_AMFtopology/PRINCE"
)
date_plate = f"/2020{date}"
plate = f"_Plate{plate}"
if skeleton:
end = "/Analysis/Skeleton" + extension
else:
end = "/Img" + f"/Img_r{get_number(row)}_c{get_number(column)}.tif"
return root_path + date_plate + plate + end
def get_dates_datetime(directory, plate):
listdir = os.listdir(directory)
list_dir_interest = [
name
for name in listdir
if name.split("_")[-1] == f'Plate{0 if plate<10 else ""}{plate}'
]
ss = [name.split("_")[0] for name in list_dir_interest]
ff = [name.split("_")[1] for name in list_dir_interest]
dates_datetime = [
datetime(
year=int(ss[i][:4]),
month=int(ss[i][4:6]),
day=int(ss[i][6:8]),
hour=int(ff[i][0:2]),
minute=int(ff[i][2:4]),
)
for i in range(len(list_dir_interest))
]
dates_datetime.sort()
return dates_datetime
def get_dirname(date,plate):
return(f'{date.year}{0 if date.month<10 else ""}{date.month}{0 if date.day<10 else ""}{date.day}_{0 if date.hour<10 else ""}{date.hour}{0 if date.minute<10 else ""}{date.minute}_Plate{0 if plate<10 else ""}{plate}')
# def get_plate_number(position_number,date):
# for index,row in plate_info.loc[plate_info['Position #']==position_number].iterrows():
# if type(row['crossed date'])==datetime:
# date_crossed = row['crossed date']
# date_harvest = row['harvest date']+timedelta(days=1)
# else:
# date_crossed = datetime.strptime(row['crossed date'], "%d.%m.%Y")
# date_harvest = datetime.strptime(row['harvest date'], "%d.%m.%Y")+timedelta(days=1)
# if date>= date_crossed and date<= date_harvest:
# return(row['Plate #'])
# def get_postion_number(plate_number):
# for index,row in plate_info.loc[plate_info['Plate #']==plate_number].iterrows():
# return(row['Position #'])
# def get_begin_index(plate_number,directory):
# plate = get_postion_number(plate_number)
# dates_datetime = get_dates_datetime(directory,plate)
# plate_number_found = get_plate_number(plate,dates_datetime[0])
# print(0,plate_number)
# for i in range(len(dates_datetime)):
# new_plate_number = get_plate_number(plate,dates_datetime[i])
# if plate_number_found!=new_plate_number:
# plate_number_found=new_plate_number
# print(i,plate_number_found)
# if plate_number_found == plate_number:
# return(i,dates_datetime[i])
def shift_skeleton(skeleton, shift):
shifted_skeleton = sparse.dok_matrix(skeleton.shape, dtype=bool)
for pixel in skeleton.keys():
# print(pixel[0]+shift[0],pixel[1]+shift[1])
if (
skeleton.shape[0] > np.ceil(pixel[0] + shift[0]) > 0
and skeleton.shape[1] > np.ceil(pixel[1] + shift[1]) > 0
):
shifted_pixel = (
np.round(pixel[0] + shift[0]),
np.round(pixel[1] + shift[1]),
)
shifted_skeleton[shifted_pixel] = 1
return shifted_skeleton
def transform_skeleton_final_for_show(skeleton_doc,Rot,trans):
skeleton_transformed={}
transformed_keys = np.round(np.transpose(np.dot(Rot,np.transpose(np.array(list(skeleton_doc.keys())))))+trans).astype(np.int)
i=0
for pixel in list(transformed_keys):
i+=1
skeleton_transformed[(pixel[0],pixel[1])]=1
skeleton_transformed_sparse=sparse.lil_matrix((27000, 60000))
for pixel in list(skeleton_transformed.keys()):
i+=1
skeleton_transformed_sparse[(pixel[0],pixel[1])]=1
return(skeleton_transformed_sparse)
def get_skeleton(exp,boundaries,t,directory):
i = t
plate = exp.plate
listdir=os.listdir(directory)
dates = exp.dates
date =dates [i]
directory_name = get_dirname(date, plate)
path_snap=directory+directory_name
skel = read_mat(path_snap+'/Analysis/skeleton_pruned_realigned.mat')
skelet = skel['skeleton']
skelet = sparse_to_doc(skelet)
Rot= skel['R']
trans = skel['t']
skel_aligned = transform_skeleton_final_for_show(skelet,np.array([[1,0],[0,1]]),np.array([0,0]))
output = skel_aligned[boundaries[2]:boundaries[3],boundaries[0]:boundaries[1]].todense()
kernel = np.ones((5,5),np.uint8)
output = cv2.dilate(output.astype(np.uint8),kernel,iterations = 2)
return(output,Rot,trans)
def get_param(folder,directory): #Very ugly but because interfacing with Matlab so most elegant solution.
path_snap=directory+folder
file1 = open(path_snap + "/param.m", 'r')
Lines = file1.readlines()
ldict = {}
for line in Lines:
exec(line.split(';')[0],globals(),ldict)
files = [ '/Img/TileConfiguration.txt.registered', '/Analysis/skeleton_compressed.mat',
'/Analysis/skeleton_masked_compressed.mat',
'/Analysis/skeleton_pruned_compressed.mat', '/Analysis/transform.mat',
'/Analysis/transform_corrupt.mat',
'/Analysis/skeleton_realigned_compressed.mat','/Analysis/nx_graph_pruned.p', '/Analysis/nx_graph_pruned_width.p','/Analysis/nx_graph_pruned_labeled.p']
for file in files:
ldict[file] = os.path.isfile(path_snap + file)
return(ldict)
def update_plate_info(directory):
listdir = os.listdir(directory)
source = f'/data_info.json'
download(API,source,target,end='')
plate_info = json.load(open(target, 'r'))
with tqdm(total=len(listdir), desc="analysed") as pbar:
for folder in listdir:
path_snap=directory+folder
if os.path.isfile(path_snap + "/param.m"):
params = get_param(folder,directory)
ss = folder.split("_")[0]
ff = folder.split("_")[1]
date = datetime(
year=int(ss[:4]),
month=int(ss[4:6]),
day=int(ss[6:8]),
hour=int(ff[0:2]),
minute=int(ff[2:4]),
)
params['date'] = datetime.strftime(date, "%d.%m.%Y, %H:%M:")
params['folder'] = folder
total_path = directory+folder
plate_info[total_path] = params
pbar.update(1)
with open(target, 'w') as jsonf:
json.dump(plate_info, jsonf, indent=4)
upload(API,target,f"{source}", chunk_size=256 * 1024 * 1024,
)
def get_data_info():
source = f'/data_info.json'
download(API,source,target,end='')
data_info = pd.read_json(target,
convert_dates=True).transpose()
data_info.index.name = 'total_path'
data_info.reset_index(inplace=True)
return(data_info)
def get_current_folders(directory):
if directory == 'dropbox':
data = []
dbx = dropbox.Dropbox(API)
response = dbx.files_list_folder("",recursive = True)
# for fil in response.entries:
listfiles = [file for file in response.entries if file.name.split(".")[-1]=="zip"]
with tqdm(total=len(listfiles), desc="analysed") as pbar:
for file in listfiles:
source = (file.path_lower.split(".")[0])+"_info.json"
target = f'{os.getenv("TEMP")}{file.name.split(".")[0]}.json'
download(API,source,target)
data.append( | pd.read_json(target) | pandas.read_json |
"""
Copyright (c) 2018, <NAME>
All rights reserved.
Licensed under the Modified BSD License.
For full license terms see LICENSE.txt
"""
import pandas as pd
class MothurIO(object):
@staticmethod
def read_shared_file(filepath):
"""Reads in and formats mothur shared file."""
data = pd.read_table(filepath)
data = data.drop(['label', 'numOtus'], axis=1)
data = data.set_index('Group').transpose()
# format the index for better consistency
data.index = data.index.rename(None)
data = data.sort_index()
return data
@staticmethod
def read_count_file(filepath):
"""Reads in and formats mothur count_file."""
data = pd.read_table(filepath, index_col=0)
data = data.drop(['total'], axis=1)
# format the index for better consistency
data.index = data.index.rename(None)
data = data.sort_index()
return data
@staticmethod
def read_taxonomy_file(filepath):
"""Reads in and formats mothur taxonomy file."""
data = pd.read_table(filepath, names=['OTU', 'Taxonomy'])
classifications = data['Taxonomy']
classifications = classifications.str.split(';', expand=True).drop(6, axis=1)
classifications.columns = list(range(1, 7))
features_names = data['OTU']
data = pd.concat([features_names, classifications], axis=1)
data = data.set_index('OTU')
# format the index for better consistency
data.index = data.index.rename(None)
data = data.sort_index()
return data
@staticmethod
def read_constaxonomy_file(filepath):
"""Reads in and formats mothur cons.taxonomy file."""
data = pd.read_table(filepath)
classifications = data['Taxonomy']
classifications = classifications.str.split(';', expand=True).drop(6, axis=1)
classifications.columns = list(range(1, 7))
features_names = data['OTU']
data = pd.concat([features_names, classifications], axis=1)
data = data.set_index('OTU')
# format the index for better consistency
data.index = data.index.rename(None)
data = data.sort_index()
return data
@staticmethod
def read_fasta_file(filepath):
"""Reads in and formats mothur fasta file."""
# the data is in the fasta file format with alternating lines of sequence name and sequence data
# we can read the data in as a single column and reshape it to separate out names from the sequences
data = pd.read_table(filepath, header=None)
data = pd.DataFrame(data.values.reshape(len(data) // 2, 2))
data.columns = ['seqName', 'seq']
# sequence names are in the fasta format and preceeded with '>' which we must remove
data.index = data['seqName'].str.split('>', 1, expand=True)[1]
data = data.drop('seqName', axis=1)
data.index = data.index.rename(None)
return data
@staticmethod
def read_repfasta_file(filepath):
"""Reads in and formats mothur fasta file."""
# the data is in the fasta file format with alternating lines of sequence name and sequence data
# we can read the data in as a single column and reshape it to separate out names from the sequences
data = | pd.read_table(filepath, header=None, sep=',') | pandas.read_table |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
self.assertEqual(hash(v), hash(td))
d = {td: 2}
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
tds))
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
self.assertNotEqual(hash(ns_td), hash(ns_td.to_pytimedelta()))
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
self.assertTrue(min_td.value == np.iinfo(np.int64).min + 1)
self.assertTrue(max_td.value == np.iinfo(np.int64).max)
# Beyond lower limit, a NAT before the Overflow
self.assertIsInstance(min_td - Timedelta(1, 'ns'),
NaTType)
with tm.assertRaises(OverflowError):
min_td - Timedelta(2, 'ns')
with tm.assertRaises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
self.assertIsInstance(td, NaTType)
with tm.assertRaises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with tm.assertRaises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = | pd.Series(['nat', '33 days'], dtype='timedelta64[ns]') | pandas.Series |
import os
import pickle
import pandas as pd
import prettytable as pt
from tqdm import tqdm
from .baseloader import BaseLoader
from ..lut import LookUpTable
from ..vocabulary import Vocabulary
class EVENTKG240KLoader(BaseLoader):
def __init__(self, dataset_path, download=False):
super().__init__(dataset_path, download,
raw_data_path="EVENTKG240K/raw_data",
processed_data_path="EVENTKG240K/processed_data",
train_name="eventkg240k_train.txt",
valid_name="eventkg240k_valid.txt",
test_name="eventkg240k_test.txt",
data_name="EVENTKG240K")
self.time_vocab = Vocabulary()
self.entity_lut_name = "eventkg240k_entities_lut.json"
self.event_lut_name = "eventkg240k_events_lut.json"
self.relation_lut_name = "eventkg240k_relations_lut.json"
def _load_data(self, path, data_type):
return BaseLoader._load_data(self, path=path, data_type=data_type,
column_names=["head", "relation", "tail", "start", "end"])
def download_action(self):
self.downloader.EVENTKG240K()
def _build_vocabs(self, train_data, valid_data, test_data):
BaseLoader._build_vocabs(self, train_data, valid_data, test_data)
self.time_vocab.buildVocab(train_data['start'].tolist(), train_data['end'].tolist(),
valid_data['start'].tolist(), valid_data['end'].tolist(),
test_data['start'].tolist(), test_data['end'].tolist())
def load_all_vocabs(self, ):
return self.node_vocab, self.relation_vocab, self.time_vocab
def save_vocabs_to_pickle(self, file_name):
with open(file_name, "wb") as file:
pickle.dump([self.node_vocab, self.relation_vocab, self.time_vocab], file, pickle.HIGHEST_PROTOCOL)
def load_vocabs_from_pickle(self, file_name):
with open(file_name, "rb") as file:
self.node_vocab, self.relation_vocab, self.time_vocab = pickle.load(file)
def _load_lut(self, path):
total_path = os.path.join(self.raw_data_path, path)
lut = LookUpTable()
lut.read_json(total_path)
lut.transpose()
return lut
def load_node_lut(self):
preprocessed_file = os.path.join(self.processed_data_path, "node_lut.pkl")
if os.path.exists(preprocessed_file):
node_lut = LookUpTable()
node_lut.read_from_pickle(preprocessed_file)
else:
entity_lut = self._load_lut(self.entity_lut_name)
entity_lut.add_column(['entity'] * len(entity_lut.data), "node_type")
event_lut = self._load_lut(self.event_lut_name)
event_lut.add_column(['event'] * len(event_lut.data), "node_type")
node_lut = entity_lut.append(event_lut)
node_lut.add_vocab(self.node_vocab)
df = pd.DataFrame([self.node_vocab.word2idx]).T
df = df.rename({0: "name_id"}, axis=1)
node_lut.data = pd.merge(df, node_lut.data, left_index=True, right_index=True, how='outer')
node_lut.data = node_lut.data.sort_values(by="name_id")
node_lut.save_to_pickle(preprocessed_file)
return node_lut
def load_relation_lut(self):
preprocessed_file = os.path.join(self.processed_data_path, "relation_lut.pkl")
if os.path.exists(preprocessed_file):
relation_lut = LookUpTable()
relation_lut.read_from_pickle(preprocessed_file)
else:
relation_lut = self._load_lut(self.relation_lut_name)
relation_lut.add_vocab(self.relation_vocab)
df = pd.DataFrame([self.relation_vocab.word2idx]).T
df = df.rename({0: "name_id"}, axis=1)
relation_lut.data = | pd.merge(df, relation_lut.data, left_index=True, right_index=True, how='outer') | pandas.merge |
import requests
import json
import pandas as pd
from pandas import json_normalize
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
from bokeh.io import show, output_file
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, WheelZoomTool
from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral4
import warnings
warnings.filterwarnings('ignore')
class Issues:
def __init__(self, repos):
self.repos = repos
token = 'my<PASSWORD>'
self.headers = {'Authorization': f'token {token}'}
self.configure_pandas()
self.df = self.init_df()
def init_df(self):
try:
dfs = []
for repo in self.repos:
url = f'https://api.github.com/repos/filetrust/{repo}/issues'
res = requests.get(url, headers=self.headers, params={'state': 'all'}).json()
data = json_normalize(res, max_level=1)
temp_df = pd.DataFrame(data)
temp_df['repo'] = repo
dfs.append(temp_df)
df = pd.concat(dfs, ignore_index=True)
return df
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def get_df(self):
return self.df
def configure_pandas(self):
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
| pd.set_option('display.max_colwidth', None) | pandas.set_option |
import sys
import pandas as pd
import os
import argparse
seeds = [1, 2, 3, 4, 5]
#langs = ['ar', 'bg', 'de', 'el', 'en', 'es', 'fr', 'hi', 'ru', 'tr', 'ur', 'vi', 'zh']
def tag_results(filename):
seeded_results = []
for seed in seeds:
results = {}
c_f = filename + "_s{}/test_results.txt".format(seed)
if os.path.exists(c_f):
with open(c_f, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("language"):
lan = line.split("=")[1]
elif line.startswith("f1"):
f1 = line.split(" = ")[1]
results[lan] = float(f1) * 100
#print(results)
#print(sum(results.values()) / len(results))
seeded_results.append(results)
df = pd.DataFrame(seeded_results)
print(df)
print(df.mean(axis=0))
print("ave over random seeds:", df.mean(axis=1).mean())
#print(df.mean(axis=1))
#print(pd.DataFrame(df, columns=langs).mean(axis=0).mean())
#df.to_csv(filename + ".csv")
def classify_results(filename):
seeded_results = []
for seed in seeds:
results = {}
c_f = filename + "_s{}/test_results.txt".format(seed)
if os.path.exists(c_f):
with open(c_f, 'r') as f:
for i, line in enumerate(f):
line = line.strip()
if i == 0: continue
if line.startswith("total") or line.startswith("=="):
continue
else:
toks = line.split("=")
lan = toks[0]
acc = toks[1]
results[lan] = float(acc) * 100
#print(results)
#print(sum(results.values()) / len(results))
seeded_results.append(results)
df = pd.DataFrame(seeded_results)
print(df)
print(df.mean(axis=1))
print("ave over random seeds:", df.mean(axis=1).mean())
#print(df.mean(axis=1))
#print(pd.DataFrame(df, columns=langs).mean(axis=0).mean())
df.to_csv(filename + ".csv")
def qa_results(filename, do_final=1):
seeded_results = []
seeded_exact_results = []
for seed in seeds:
results = {}
exact_results = {}
c_f = filename + "_s{}/predictions/xquad/xquad_test_results_{}.txt".format(
seed, do_final)
if os.path.exists(c_f):
with open(c_f, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("language"):
lan = line.split(" = ")[1]
elif line.startswith("f1"):
f1 = line.split(" = ")[1]
results[lan] = float(f1)
elif line.startswith("exact"):
f1 = line.split(" = ")[1]
exact_results[lan] = float(f1)
#print(results)
#print(sum(results.values()) / len(results))
seeded_results.append(results)
seeded_exact_results.append(exact_results)
df = pd.DataFrame(seeded_results)
exact_df = pd.DataFrame(seeded_exact_results)
combined_df = | pd.concat([df, exact_df], keys=('f1', 'exact f1')) | pandas.concat |
#T3-T4.py
#!/usr/bin/python3
import pandas as pd
#Lataa Titanic csv tiedostot pythoniin pandas dataframeen (Titanic_data.csv, Titanic_names.csv)
df_data = pd.read_csv('./Titanic_data.csv', header=0)
df_names = pd.read_csv('./Titanic_names.csv', header=0)
#Tulosta ipyhon consoliin dataframien info ja describe. Tee histogrammi df_titanic_data ( bins=4) dataframesta.
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.info.html?highlight=info#pandas.DataFrame.info
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.describe.html?highlight=describe#pandas.DataFrame.describe
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.hist.html?highlight=hist#pandas.DataFrame.hist
#Yhdistä dataframet (df_titanic_data, df_titanic_names) uudeksi dataframeksi df. Käytä how='inner' on='id'.
df = | pd.merge(df_data, df_names, how='inner', on='id') | pandas.merge |
#!/usr/bin/env python
import os, numpy as np
import plot_utils
class Attack():
"""
TODO: Write Comment
"""
def __init__(self, args):
"""
TODO: Write Comment
"""
self.args = args
self.VERBOSE = self.args.verbose
self.FAMILY_DATASET = self.args.family_dataset
self.NUM_MODEL = self.args.model
self.NUM_SAMPLES = self.args.samples
self.TARGETED = self.args.targeted
self.PLOT_IMAGE = self.args.plot_image
self.set_attack_name()
self.model = self.set_model()
self.columns = ['attack', 'model',
'threshold', 'image',
'true', 'predicted',
'success', 'cdiff',
'prior_probs', 'predicted_probs',
'perturbation', 'attacked_image',
'l2_distance']
def set_model(self):
"""
TODO: Write Comment
"""
if self.FAMILY_DATASET == 0:
from networks import mnist
if self.NUM_MODEL == 0: model = mnist.mlp.MLP(self.args)
elif self.NUM_MODEL == 1: model = mnist.conv.Conv(self.args)
elif self.FAMILY_DATASET == 1:
from networks import cifar
if self.NUM_MODEL == 0: model = cifar.custom_model.Custom(self.args)
elif self.NUM_MODEL == 1: model = cifar.lenet.LeNet(self.args)
elif self.NUM_MODEL == 2: model = cifar.all_conv.AllConv(self.args)
elif self.NUM_MODEL == 3: model = cifar.network_in_network.NetworkInNetwork(self.args)
elif self.NUM_MODEL == 4: model = cifar.resnet.ResNet(self.args)
elif self.NUM_MODEL == 5: model = cifar.densenet.DenseNet(self.args)
elif self.NUM_MODEL == 6: model = cifar.wide_resnet.WideResNet(self.args)
elif self.NUM_MODEL == 7: model = cifar.vgg.VGG16(self.args)
elif self.NUM_MODEL == 8: model = cifar.capsnet.CapsNet(self.args)
elif self.NUM_MODEL == 9: model = cifar.vgg.VGG19(self.args)
elif self.FAMILY_DATASET == 2:
from networks import imagenet
if self.NUM_MODEL == 0: model = imagenet.keras_applications.InceptionV3(self.args)
elif self.NUM_MODEL == 1: model = imagenet.keras_applications.InceptionResNetV2(self.args)
elif self.NUM_MODEL == 2: model = imagenet.keras_applications.Xception(self.args)
elif self.NUM_MODEL == 3: model = imagenet.keras_applications.ResNet50(self.args)
elif self.NUM_MODEL == 4: model = imagenet.keras_applications.ResNet101(self.args)
elif self.NUM_MODEL == 5: model = imagenet.keras_applications.Resnet152(self.args)
elif self.NUM_MODEL == 6: model = imagenet.keras_applications.ResnetV250(self.args)
elif self.NUM_MODEL == 7: model = imagenet.keras_applications.ResNetV2101(self.args)
elif self.NUM_MODEL == 8: model = imagenet.keras_applications.ResnetV2152(self.args)
elif self.NUM_MODEL == 9: model = imagenet.keras_applications.DenseNet121(self.args)
elif self.NUM_MODEL == 10: model = imagenet.keras_applications.DenseNet169(self.args)
elif self.NUM_MODEL == 11: model = imagenet.keras_applications.DenseNet201(self.args)
elif self.NUM_MODEL == 12: model = imagenet.keras_applications.MobileNet(self.args)
elif self.NUM_MODEL == 13: model = imagenet.keras_applications.MobileNetV2(self.args)
elif self.NUM_MODEL == 14: model = imagenet.keras_applications.NASNetMobile(self.args)
elif self.NUM_MODEL == 15: model = imagenet.keras_applications.NASNetLarge(self.args)
elif self.NUM_MODEL == 16: model = imagenet.keras_applications.VGG16(self.args)
elif self.NUM_MODEL == 17: model = imagenet.keras_applications.VGG19(self.args)
model.load()
return model
def start_attack(self, target_class, limit=0):
"""
TODO: Write Comment
"""
attack_result = self.attack(target_class, limit)
original_image = self.x
attacked_image = self.perturb_image(attack_result)[0]
prior_probs = self.model.predict(original_image)[0]
predicted_probs = self.model.predict(attacked_image)[0]
actual_class = self.y # Or, np.argmax(prior_probs)
predicted_class = np.argmax(predicted_probs)
success = predicted_class != actual_class
cdiff = prior_probs[actual_class] - predicted_probs[actual_class]
l2_distance = np.linalg.norm(original_image.astype(np.float64)-attacked_image.astype(np.float64))
if self.PLOT_IMAGE:
if not os.path.exists(f"./logs/images/{self.dir_path}"): os.makedirs(f"./logs/images/{self.dir_path}")
plot_utils.plot_image(f"./logs/images/{self.dir_path}", self.img, attacked_image, self.x, self.model.class_names[actual_class], self.model.class_names[predicted_class], limit, l2_distance)
return [[self.attack_name, self.model.name, limit, self.img, actual_class, predicted_class, success, cdiff, prior_probs, predicted_probs, attack_result, attacked_image, l2_distance]], success
def start(self):
"""
TODO: Write Comment
"""
# return None
import os, pickle, pandas as pd
self.dir_path = f"{self.attack_name}/{self.model.dataset_name}/{self.model.name}"
if not os.path.exists(f"./logs/results/{self.dir_path}"): os.makedirs(f"./logs/results/{self.dir_path}")
image_results = []
imgs, xs, ys = self.model.get(self.NUM_SAMPLES)
targets = [None] if not self.TARGETED else range(self.dataset_label_size)
for i in range(self.NUM_SAMPLES):
self.img, self.x, self.y = imgs[i], xs[i], ys[i]
if self.VERBOSE == True: print(f"[#]Attacking {self.model.name} with {self.attack_name} -- image {self.img}- {i+1}/{self.NUM_SAMPLES}")
for target in targets:
if (self.TARGETED) and (target == self.y): continue
target_class = target if self.TARGETED else self.y
image_results += self.attack_image(target_class)
with open(f"./logs/results/{self.dir_path}/results.pkl", 'wb') as file: pickle.dump(image_results, file)
return | pd.DataFrame(image_results, columns=self.columns) | pandas.DataFrame |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
import numpy as np
from mars.tests.core import TestBase, parameterized, ExecutorForTest
from mars.dataframe.datasource.series import from_pandas as from_pandas_series
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
reduction_functions = dict(
sum=dict(func_name='sum', has_min_count=True),
prod=dict(func_name='prod', has_min_count=True),
min=dict(func_name='min', has_min_count=False),
max=dict(func_name='max', has_min_count=False),
mean=dict(func_name='mean', has_min_count=False),
var=dict(func_name='var', has_min_count=False),
std=dict(func_name='std', has_min_count=False),
)
@parameterized(**reduction_functions)
class TestReduction(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def compute(self, data, **kwargs):
return getattr(data, self.func_name)(**kwargs)
def testSeriesReduction(self):
data = pd.Series(np.random.randint(0, 8, (10,)), index=[str(i) for i in range(10)], name='a')
reduction_df1 = self.compute(from_pandas_series(data))
self.assertEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=6))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=4), axis='index')
self.assertAlmostEqual(
self.compute(data, axis='index'), self.executor.execute_dataframe(reduction_df4, concat=True)[0])
data = pd.Series(np.random.rand(20), name='a')
data[0] = 0.1 # make sure not all elements are NAN
data[data > 0.5] = np.nan
reduction_df1 = self.compute(from_pandas_series(data, chunk_size=3))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df2, concat=True)[0]))
if self.has_min_count:
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False, min_count=2)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df3, concat=True)[0]))
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=3), min_count=1)
self.assertAlmostEqual(
self.compute(data, min_count=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
reduction_df5 = self.compute(from_pandas_series(data, chunk_size=3), min_count=21)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df5, concat=True)[0]))
def testDataFrameReduction(self):
data = pd.DataFrame(np.random.rand(20, 10))
reduction_df1 = self.compute(from_pandas_df(data))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=6), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), axis=1)
pd.testing.assert_series_equal(
self.compute(data, axis=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
# test null
np_data = np.random.rand(20, 10)
np_data[np_data > 0.6] = np.nan
data = pd.DataFrame(np_data)
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
if self.has_min_count:
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), min_count=15)
pd.testing.assert_series_equal(
self.compute(data, min_count=15),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), min_count=3)
pd.testing.assert_series_equal(
self.compute(data, min_count=3),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
reduction_df5 = self.compute(from_pandas_df(data, chunk_size=3), axis=1, min_count=3)
pd.testing.assert_series_equal(
self.compute(data, axis=1, min_count=3),
self.executor.execute_dataframe(reduction_df5, concat=True)[0])
reduction_df5 = self.compute(from_pandas_df(data, chunk_size=3), axis=1, min_count=8)
pd.testing.assert_series_equal(
self.compute(data, axis=1, min_count=8),
self.executor.execute_dataframe(reduction_df5, concat=True)[0])
# test numeric_only
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=2))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=6), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), axis='columns')
pd.testing.assert_series_equal(
self.compute(data, axis='columns'),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
data_dict = dict((str(i), np.random.rand(10)) for i in range(10))
data_dict['string'] = [str(i) for i in range(10)]
data_dict['bool'] = np.random.choice([True, False], (10,))
data = pd.DataFrame(data_dict)
reduction_df = self.compute(from_pandas_df(data, chunk_size=3), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df, concat=True)[0])
class TestCount(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def testSeriesCount(self):
array = np.random.rand(10)
array[[2, 7, 9]] = np.nan
data = | pd.Series(array) | pandas.Series |
from os import replace
import re
from numpy import product
from numpy.linalg import norm
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score
from shared.data_utility import *
from matplotlib.pyplot import *
from itertools import product
data = pd.read_csv('data/study/movie_reviews.tsv', sep='\t')
data = data.drop('id', axis=1)
train_data, test_data = build_train_test_data(data, factor=0.8)
target_train_data = train_data.sentiment
target_test_data = test_data.sentiment
train_data = train_data.drop('sentiment', axis=1)
test_data = test_data.drop('sentiment', axis=1)
def build_nb_model(max_features=None, min_df=1, nb_alpha=1.0, vectorizer_type='Counter', return_pred=True):
vectorizer = CountVectorizer(max_features=max_features, min_df=min_df)
if vectorizer_type == 'Tfidf': vectorizer = TfidfVectorizer(max_features=max_features, min_df=min_df)
features = vectorizer.fit_transform(train_data.review)
test_features = vectorizer.transform(test_data.review)
model = MultinomialNB(alpha=nb_alpha)
model.fit(features, target_train_data)
pred = model.predict_proba(test_features)
return model, vectorizer, {
'max_features': max_features,
'min_df': min_df,
'nb_alpha': nb_alpha,
'auc': roc_auc_score(target_test_data, pred[:,1]),
'pred': pred if return_pred else None,
}
def build_rf_model(max_features=None, min_df=1):
vectorizer = TfidfVectorizer(stop_words='english', strip_accents='unicode',
min_df=min_df, max_features=max_features, norm='l2')
features = vectorizer.fit_transform(train_data.review)
test_features = vectorizer.transform(test_data.review)
model = RandomForestClassifier(n_estimators=100, n_jobs=-1, verbose=True)
model.fit(features, target_train_data)
pred = model.predict_proba(test_features)
params = {
'max_features': max_features,
'min_df': min_df
}
return model, vectorizer, pred, params
param_values = {
'max_features': [None],
'min_df': [1,2,3],
'nb_alpha': [0.01, 0.1, 1.0],
'vectorizer_type': ['Counter', 'Tfidf']
}
#The best setup found by bruteforce
#29 NaN 1 1.0 0.933601
def bruteforce_hyperparams():
results = []
for p in product(*param_values.values()):
params = zip(param_values.keys(), p)
params = dict(params)
res = build_nb_model(**params)
results.append(res)
print(res)
return | pd.DataFrame(results) | pandas.DataFrame |
import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
import numpy.testing as np_testing
from sklearn.cluster import KMeans
class Test(unittest.TestCase):
def setUp(self):
import Exercise12_04
self.exercises = Exercise12_04
self.file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter12/Dataset/ames_iowa_housing.csv'
self.df = pd.read_csv(self.file_url)
self.df_agg = self.df.groupby(['Neighborhood', 'YrSold']).agg({'SalePrice': 'max'}).reset_index()
self.df_agg.columns = ['Neighborhood', 'YrSold', 'SalePriceMax']
self.df_new = pd.merge(self.df, self.df_agg, how='left', on=['Neighborhood', 'YrSold'])
self.df_new['SalePriceRatio'] = self.df_new['SalePrice'] / self.df_new['SalePriceMax']
self.df_agg2 = self.df.groupby(['Neighborhood', 'YrSold']).agg({'LotArea': 'max'}).reset_index()
self.df_agg2.columns = ['Neighborhood', 'YrSold', 'LotAreaMax']
self.df_final = pd.merge(self.df_new, self.df_agg2, how='left', on=['Neighborhood', 'YrSold'])
self.df_final['LotAreaRatio'] = self.df_final['LotArea'] / self.df_final['LotAreaMax']
def test_file_url(self):
self.assertEqual(self.exercises.file_url, self.file_url)
def test_df(self):
pd_testing.assert_frame_equal(self.exercises.df, self.df)
def test_df_agg(self):
| pd_testing.assert_frame_equal(self.exercises.df_agg, self.df_agg) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 6 15:08:25 2021
@author: ccamargo
"""
import pandas as pd
import xarray as xr
import numpy as np
#%% find closest points of our TG, make sure that they are water
def harvedist(lats,lons,qlats,qlons):
# https://www.movable-type.co.uk/scripts/latlong.html
#calculate angular distances between query coordinates (qlats,qlons) and data array grid (lats,lons)
lat0,lat = np.meshgrid(np.radians(lats),np.radians(qlats))
lon0,lon = np.meshgrid(np.radians(lons),np.radians(qlons))
# lat0,lat = np.meshgrid(lats,qlats)
# lon0,lon = np.meshgrid(lons,qlons)
lat=np.radians(lat);lat0=np.radians(lat0)
lon=np.radians(lon);lon0=np.radians(lon0)
delta_lat = np.array(lat-lat0)
delta_lon = np.array(lon-lon0)
R =6373 # km
a = np.sin(delta_lat / 2)**2 + np.cos(lat) * np.cos(lat0) * np.sin(delta_lon / 2)**2
c = 2 * np.arctan2(np.sqrt(a),np.sqrt(1-a))
# d = R * c
# print(d)
return(np.degrees(c))
# def find_nearest(lats,lons,qlats,qlons):
# #finds nearest coordinates to query latitude
# #and longitude pairs based on minimum angular distance [deg]
# #calculate angular distances between query coordinates and data array grid
# dists,dist_km =harvedist(lats,lons,qlats,qlons)
# min_dists = np.nanmin(dists,axis=1) #find minimum angular distances in grid to query points
# min_idx = np.nanargmin(dists,axis=1) #indices
# # min_dist_km = dist_km[min_idx]
# out_lat = lats.flatten()[min_idx]
# out_lon = lons.flatten()[min_idx]
# return min_dists, min_idx, out_lat, out_lon
#% %
def angdist(lats,lons,qlats,qlons):
lat0,lat = np.meshgrid(np.radians(lats),np.radians(qlats))
lon0,lon = np.meshgrid(np.radians(lons),np.radians(qlons))
temp = np.arctan2(np.sqrt((np.cos(lat)*np.sin(lon-lon0))**2 + (np.cos(lat0)*np.sin(lat) - np.sin(lat0)*np.cos(lat) * np.cos(lon-lon0))**2),
(np.sin(lat0)*np.sin(lat) + np.cos(lat0)*np.cos(lat)*np.cos(lon-lon0)))
return(np.degrees(temp))
def find_nearest(da,qlats,qlons):
#finds nearest unmasked ocean grid cell in xarray dataarray to query latitude
#and longitude pairs based on minimum angular distance [deg]
#fetch coordinate names
lonname = np.array(da.coords)[['lon' in x for x in da.coords]][0]
latname = np.array(da.coords)[['lat' in x for x in da.coords]][0]
#get lats & lons
lats = np.array(da[latname])
lons = np.array(da[lonname])
if lats.shape!=lons.shape:
lats,lons = np.meshgrid(lats,lons)
#calculate angular distances between query coordinates and data array grid
dists=angdist(lats,lons,qlats,qlons)
#mask land out
if 'time' in da.dims:
dists[0,~np.isfinite(da.isel(time=0).values.flatten())] = np.nan
else:
dists[0,~np.isfinite(da.values.flatten())] = np.nan
min_dists = np.nanmin(dists,axis=1) #find minimum angular distances in grid to query points
min_idx = np.nanargmin(dists,axis=1) #indices
out_lat = lats.flatten()[min_idx]
out_lon = lons.flatten()[min_idx]
#potentially build in a filter here if unreasonably large distances
return min_dists, min_idx, out_lat, out_lon
#% % sel 10 coastal location
cat = | pd.read_pickle('/Volumes/LaCie_NIOZ/data/barystatic/coastal_locations.p') | pandas.read_pickle |
import random
import pandas as pd
import math
import copy
from util import order,weight,maze,check_maze
def insert(block, map1, flag, count, area, df):
minsize = 50
maxsize = 255
ran_num = random.randint(minsize, maxsize) # 블록 색깔
no_insert = 0
# print(block)
# print("반입 함수")
pos_loc = [] # 가능 위치
h_yard = block.width # 가로 길이
v_yard = block.height # 세로 길이
num = block.block_number
s = maze.Maze(map1.map, h_yard, v_yard)
start = s.find_start(flag)
for i in start:
pos_loc.extend(s.bfs(i))
if len(pos_loc) == 0: # 들어갈 공간 X ,pos_loc 과 start
count += 1
area += h_yard * v_yard
df.loc[df.block_number == num, 'position_x'] = None
df.loc[df.block_number == num, 'position_y'] = None
return count, area
# print(pos_loc)
# print(len(pos_loc))
insert_loc = random.choice(pos_loc)
df.loc[df.block_number == num, 'position_x'] = insert_loc[1]
df.loc[df.block_number == num, 'position_y'] = insert_loc[0]
map1.map[insert_loc[0]:insert_loc[0]+v_yard, insert_loc[1]:insert_loc[1]+h_yard] = 1
map1.map_color[insert_loc[0]:insert_loc[0]+v_yard, insert_loc[1]:insert_loc[1]+h_yard] = ran_num
block_data = block.to_dict() # 블록 데이터 가공
block_data['position_x'] = insert_loc[1]
block_data['position_y'] = insert_loc[0]
map1.block_data(block_data) # 맵 객체에 블록 데이터 추가
return count, area
def out(block, map1, count, flag, df, curr):
test_map = copy.deepcopy(map1)
if math.isnan(block.position_x):
# print("문제 발생!!!")
# print("반입 금지로 인한 반출X")
# print(df)
# print(block)
# input()
return count, df
curr_block_index = None
num_map = test_map.block_num_map()
for index, j in enumerate(test_map.data): # 블록 데이터 pop
if j['block_number'] == block.block_number:
curr_block_index = index
# print(block)
# print(num_map)
# print(df)
map1.data.pop(curr_block_index)
test_map.data.pop(curr_block_index) # 밑에 계산 때문에 잠시 제외
# width, height, x, y = trans_data(block)
no_out = out_check(block, test_map, flag, 1)
# 비용증가
if no_out:
# print("드가자 ")
# print(num_map)
# before = test_map.cv2_map()
# before = cv2.resize(before, (600, 600), interpolation=cv2.INTER_NEAREST)
# cv2.namedWindow('before', cv2.WINDOW_NORMAL)
# cv2.imshow('before', before)
# cv2.waitKey(0)
obstruct_block_index = None
obstruct_block = find_out(test_map.data, block, flag, test_map, num_map)
if block.block_number in obstruct_block:
obstruct_block.remove(block.block_number)
# TODO 간섭 블록 어떻게 할지만 정해주면 됨
for x in obstruct_block:
# # 데이터 프레임 추가
# print('현재 인덱스{}'.format(curr))
# print('간섭블록{}'.format(x))
# print(df)
# temp = df.loc[df.block_number == x]
# print(temp)
# temp = temp.iloc[-1]
# temp['date'] = df.loc[curr]['date']
# temp['type'] = 1
# temp1 = df[df.index <= curr]
# temp2 = df[df.index > curr]
# df = temp1.append(temp, ignore_index=True).append(temp2, ignore_index=True)
# df.loc[curr + 1] = temp
# print(df)
# print("자 드가자", x)
# print('현재 인덱스{}'.format(curr))
# print('간섭블록{}'.format(x))
# order.order(df)
# 데이터 삭제
for index, j in enumerate(map1.data): # 블록 데이터 pop
if j['block_number'] == x:
obstruct_block_index = index
# print('현재 인덱스{}'.format(curr))
# print('간섭블록{}'.format(x))
temp = pd.DataFrame(map1.data[obstruct_block_index], index=[0])
erase = | pd.Series(map1.data[obstruct_block_index]) | pandas.Series |
import sys
import h5py
import numpy as np
import pandas as pd
import numba as nb
def readDataset(group, datasetname):
ds = group[datasetname][:]
if ds.shape[1] == 1:
ds = ds.flatten()
return ds
def create_reference(f):
# Gather all index information
cols = ['run','subrun','cycle','batch']
dic = {col:readDataset(f['rec.hdr'],col) for col in cols}
assert dic['run'].shape[0]
# Group together to just the index information
df = | pd.DataFrame(dic) | pandas.DataFrame |
import os
import numpy
import pandas as pd
import scipy.stats as st
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs')
def summary_cost(int_details,ctrl_m,ctrl_f,trt_m,trt_f, text):
int_dwc = 1 / (1 + discount_rate) ** numpy.array(range(time_horizon))
int_c = numpy.array([[prog_cost] * time_horizon for i in range(1)])
int_cost = numpy.sum(numpy.dot(int_c, int_dwc))
female_pop = 188340000
male_pop = 196604000
pop = female_pop + male_pop
f_prop = female_pop / pop
m_prop = male_pop / pop
samples = ctrl_m.shape[0]
cs = 0
nq = 0
ic = [0.00 for i in range(samples)]
q_gained = [0.00 for i in range(samples)]
q_inc_percent = [0.00 for i in range(samples)]
htn_cost = [0.00 for i in range(samples)]
cvd_cost = [0.00 for i in range(samples)]
net_cost = [0.00 for i in range(samples)]
exp_inc_per = [0.00 for i in range(samples)]
for i in range(samples):
q_gained[i] = (((ctrl_m.loc[i, "Average DALYs"] - trt_m.loc[i, "Average DALYs"])* m_prop) + ((ctrl_f.loc[i, "Average DALYs"] - trt_f.loc[i, "Average DALYs"])* f_prop))
q_inc_percent[i] = q_gained[i] * 100/((ctrl_m.loc[i, "Average DALYs"] * m_prop) + (ctrl_f.loc[i, "Average DALYs"] *f_prop))
htn_cost[i] = int_cost + ((trt_m.loc[i, "Average HTN Cost"] - ctrl_m.loc[i, "Average HTN Cost"]) * m_prop) + ((trt_f.loc[i, "Average HTN Cost"] - ctrl_f.loc[i, "Average HTN Cost"]) * f_prop)
cvd_cost[i] = ((trt_m.loc[i, "Average CVD Cost"] - ctrl_m.loc[i, "Average CVD Cost"] + trt_m.loc[i, "Average Chronic Cost"] - ctrl_m.loc[i, "Average Chronic Cost"]) * m_prop) + ((trt_f.loc[i, "Average CVD Cost"] - ctrl_f.loc[i, "Average CVD Cost"] + trt_f.loc[i, "Average Chronic Cost"] - ctrl_f.loc[i, "Average Chronic Cost"]) * f_prop)
exp_inc_per[i] = (((trt_m.loc[i, "Average Cost"] - ctrl_m.loc[i, "Average Cost"] + int_cost) * m_prop) + ((trt_f.loc[i, "Average Cost"] - ctrl_f.loc[i, "Average Cost"] + int_cost) * f_prop)) * 100 / ((ctrl_m.loc[i, "Average Cost"] * m_prop ) + (ctrl_f.loc[i, "Average Cost"] * f_prop))
net_cost[i] = htn_cost[i] + cvd_cost[i]
ic[i] = net_cost[i] / q_gained[i]
if net_cost[i] < 0:
cs = cs + 1
if q_gained[i] < 0:
nq = nq + 1
budget_impact = numpy.mean(net_cost) * pop / time_horizon
htn_percap = numpy.mean(htn_cost) / time_horizon
cvd_percap = numpy.mean(cvd_cost) / time_horizon
htn_annual = numpy.mean(htn_cost) * pop / time_horizon
cvd_annual = numpy.mean(cvd_cost) * pop / time_horizon
cost_inc = numpy.mean(exp_inc_per)
ICER = numpy.mean(ic)
QALY = numpy.mean(q_inc_percent)
HTN = numpy.mean(htn_cost)
CVD = numpy.mean(cvd_cost)
icer_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(ic), scale=st.sem(ic))
qaly_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(q_inc_percent), scale=st.sem(q_inc_percent))
htn = st.t.interval(0.95, samples - 1, loc=numpy.mean(htn_cost), scale=st.sem(htn_cost))
cvd = st.t.interval(0.95, samples - 1, loc=numpy.mean(cvd_cost), scale=st.sem(cvd_cost))
cost_inc_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(exp_inc_per), scale=st.sem(exp_inc_per))
if budget_impact < 0:
m_icer = 'Cost Saving'
s_icer = 'CS'
else:
m_icer = numpy.mean(net_cost) / numpy.mean(q_gained)
s_icer = str(numpy.round(m_icer,1))
m_daly = str(numpy.round(QALY,3)) + "\n(" + str(numpy.round(qaly_95[0],3)) + " to " + str(numpy.round(qaly_95[1],3)) + ")"
m_htn = str(numpy.round(HTN,2)) + "\n(" + str(numpy.round(htn[0],2)) + " to " + str(numpy.round(htn[1],2)) + ")"
m_cvd = str(numpy.round(CVD,2)) + "\n(" + str(numpy.round(cvd[0],2)) + " to " + str(numpy.round(cvd[1],2)) + ")"
m_costinc = str(numpy.round(cost_inc, 2)) + "\n(" + str(numpy.round(cost_inc_95[0], 2)) + " to " + str(numpy.round(cost_inc_95[1], 2)) + ")"
m_budget = str(numpy.round(budget_impact,0)/1000)
err_cost = 1.96 * st.sem(exp_inc_per)
err_daly = 1.96 * st.sem(q_inc_percent)
str_icer = text + " (" + s_icer + ")"
detailed = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], ICER, icer_95[0],icer_95[1], QALY, qaly_95[0], qaly_95[1], htn[0], htn[1], cvd[0], cvd[1], budget_impact, htn_annual, cvd_annual, htn_percap, cvd_percap, cs, nq]
manuscript = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], m_icer, m_daly, m_costinc, m_htn, m_cvd, m_budget, cs]
plot = [text, str_icer, cost_inc, QALY, err_cost, err_daly]
return detailed, manuscript, plot
summary_output = []
appendix_output = []
plot_output = []
'''Analysis 0: Baseline'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Base Case')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 1: Doubled Medication Cost'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 2, 0, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 2, 0, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'2X Medication Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 2: Increased Programmatic Cost'''
time_horizon = 20
prog_cost = 0.13*4
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'4X Programmatic Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 3: 20% reduction in baseline CVD risk'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 1, 0.2, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 1, 0.2, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = | pd.read_csv(file_name_f) | pandas.read_csv |
#!/usr/bin/env python
r"""Test :py:class:`~solarwindpy.core.vector.Vector` and :py:class:`~solarwindpy.core.tensor.Tensor`.
"""
import pdb
# import re as re
import numpy as np
import pandas as pd
import unittest
import sys
import pandas.testing as pdt
from unittest import TestCase
from abc import ABC, abstractproperty
from scipy import constants
# import test_base as base
from solarwindpy.tests import test_base as base
from solarwindpy import vector
from solarwindpy import tensor
pd.set_option("mode.chained_assignment", "raise")
class QuantityTestBase(ABC):
def test_data(self):
data = self.data
if isinstance(data, pd.Series):
pdt.assert_series_equal(data, self.object_testing.data)
else:
pdt.assert_frame_equal(data, self.object_testing.data)
def test_eq(self):
print_inline_debug = False
object_testing = self.object_testing
# ID should be equal.
self.assertEqual(object_testing, object_testing)
# Data and type should be equal.
new_object = object_testing.__class__(object_testing.data)
if print_inline_debug:
print(
"<Test>",
"<object_testing>",
type(object_testing),
object_testing,
object_testing.data,
"<new_object>",
type(new_object),
new_object,
new_object.data,
"",
sep="\n",
)
self.assertEqual(object_testing, new_object)
def test_neq(self):
object_testing = self.object_testing
# Data isn't equal
self.assertNotEqual(
object_testing, object_testing.__class__(object_testing.data * 4)
)
# Type isn't equal
for other in (
[],
tuple(),
np.array([]),
pd.Series(dtype=np.float64),
pd.DataFrame(dtype=np.float64),
):
self.assertNotEqual(object_testing, other)
def test_empty_data_catch(self):
with self.assertRaisesRegex(
ValueError, "You can't set an object with empty data."
):
self.object_testing.__class__(pd.DataFrame())
#####
# Vectors
#####
class VectorTestBase(QuantityTestBase):
def test_components(self):
# print("test_components")
# print(self.data.iloc[:, :7], flush=True)
v = self.data
# print(v, file=sys.stdout)
pdt.assert_series_equal(v.x, self.object_testing.x)
pdt.assert_series_equal(v.y, self.object_testing.y)
pdt.assert_series_equal(v.z, self.object_testing.z)
def test_mag(self):
# print("test_mag")
# print(self.data.iloc[:, :7], flush=True)
x = self.data.x
y = self.data.y
z = self.data.z
# print(v, file=sys.stdout)
mag = np.sqrt(x.pow(2) + y.pow(2) + z.pow(2))
# mag = self.data.loc[:, ["x", "y", "z"]].pow(2).sum(axis=1).pipe(np.sqrt)
mag.name = "mag"
# print("", self.data, mag, self.object_testing.mag, sep="\n")
pdt.assert_series_equal(mag, self.object_testing.mag)
pdt.assert_series_equal(mag, self.object_testing.magnitude)
pdt.assert_series_equal(self.object_testing.mag, self.object_testing.magnitude)
def test_rho(self):
# print("test_rho")
x = self.data.x
y = self.data.y
rho = np.sqrt(x.pow(2) + y.pow(2))
rho.name = "rho"
pdt.assert_series_equal(rho, self.object_testing.rho)
def test_colat(self):
# print("test_colat")
x = self.data.x
y = self.data.y
z = self.data.z
colat = np.arctan2(z, np.sqrt(x.pow(2) + y.pow(2)))
colat = np.rad2deg(colat)
colat.name = "colat"
pdt.assert_series_equal(colat, self.object_testing.colat)
def test_longitude(self):
# print("test_longitude")
x = self.data.x
y = self.data.y
lon = np.arctan2(y, x)
lon = np.rad2deg(lon)
lon.name = "longitude"
pdt.assert_series_equal(lon, self.object_testing.lon)
pdt.assert_series_equal(lon, self.object_testing.longitude)
pdt.assert_series_equal(self.object_testing.lon, self.object_testing.longitude)
def test_r(self):
# print("test_r")
x = self.data.x
y = self.data.y
z = self.data.z
r = np.sqrt(x.pow(2) + y.pow(2) + z.pow(2))
r.name = "r"
pdt.assert_series_equal(r, self.object_testing.r)
pdt.assert_series_equal(r, self.object_testing.mag, check_names=False)
pdt.assert_series_equal(
self.object_testing.r, self.object_testing.mag, check_names=False
)
def test_cartesian(self):
v = self.data.loc[:, ["x", "y", "z"]]
pdt.assert_frame_equal(v, self.object_testing.cartesian)
def test_unit_vector(self):
v = self.data.loc[:, ["x", "y", "z"]]
mag = v.pow(2).sum(axis=1).pipe(np.sqrt)
uv = v.divide(mag, axis=0)
uv.name = "uv"
uv = vector.Vector(uv)
pdt.assert_frame_equal(uv.data, self.object_testing.unit_vector.data)
pdt.assert_frame_equal(uv.data, self.object_testing.uv.data)
pdt.assert_frame_equal(
self.object_testing.uv.data, self.object_testing.unit_vector.data
)
self.assertEqual(uv, self.object_testing.unit_vector)
self.assertEqual(uv, self.object_testing.uv)
self.assertEqual(self.object_testing.unit_vector, self.object_testing.uv)
def test_project(self):
b = (
base.TestData()
.plasma_data.xs("b", axis=1, level="M")
.xs("", axis=1, level="S")
.loc[:, ["x", "y", "z"]]
)
# b.setUpClass()
# b = (
# b.data.b.loc[:, ["x", "y", "z"]]
# .xs("", axis=1, level="S")
# .xs("", axis=1, level="N")
# )
bmag = b.pow(2).sum(axis=1).pipe(np.sqrt)
buv = b.divide(bmag, axis=0)
v = self.data.loc[:, ["x", "y", "z"]]
vmag = v.pow(2).sum(axis=1).pipe(np.sqrt)
# vuv = v.divide(vmag, axis=0)
par = v.multiply(buv, axis=1).sum(axis=1)
per = (
v.subtract(buv.multiply(par, axis=0), axis=1)
.pow(2)
.sum(axis=1)
.pipe(np.sqrt)
)
projected = pd.concat([par, per], axis=1, keys=["par", "per"], sort=True)
# print("",
# "<Test>",
# "<buv>", type(buv), buv,
# "<v>", type(v), v,
# "<vmag>", type(vmag), vmag,
# "<vuv>", type(vuv), vuv,
# "<projected>", type(projected), projected,
# "",
# sep="\n")
b = vector.Vector(b)
pdt.assert_frame_equal(projected, self.object_testing.project(b))
# Projecting a thing onto itself should return 1 for parallel
# and 0 for perp.
per = pd.Series(0.0, index=per.index)
projected = pd.concat([vmag, per], axis=1, keys=["par", "per"], sort=True)
pdt.assert_frame_equal(
projected, self.object_testing.project(self.object_testing)
)
msg = "`project` method needs"
with self.assertRaisesRegex(NotImplementedError, msg):
self.object_testing.project(b.data)
def test_cos_theta(self):
# b = base.TestData()
# b.setUpClass()
# b = (
# b.data.b.loc[:, ["x", "y", "z"]]
# .xs("", axis=1, level="S")
# .xs("", axis=1, level="N")
# )
b = (
base.TestData()
.plasma_data.xs("b", axis=1, level="M")
.xs("", axis=1, level="S")
.loc[:, ["x", "y", "z"]]
)
bmag = b.pow(2).sum(axis=1).pipe(np.sqrt)
buv = b.divide(bmag, axis=0)
v = self.data.loc[:, ["x", "y", "z"]]
vmag = v.pow(2).sum(axis=1).pipe(np.sqrt)
vuv = v.divide(vmag, axis=0)
cos_theta = vuv.multiply(buv, axis=1).sum(axis=1)
# print("",
# "<Test>",
# "<buv>", type(buv), buv,
# "<v>", type(v), v,
# "<vmag>", type(vmag), vmag,
# "<vuv>", type(vuv), vuv,
# "<cos_theta>", type(cos_theta), cos_theta,
# "",
# sep="\n")
b = vector.BField(b)
pdt.assert_series_equal(cos_theta, self.object_testing.cos_theta(b))
# Projecting a thing onto itself should return 1 for parallel
# and 0 for perp.
v = vector.Vector(v)
vuv = vector.Vector(vuv)
par = pd.Series(1.0, index=vmag.index)
pdt.assert_series_equal(par, self.object_testing.cos_theta(v))
pdt.assert_series_equal(par, self.object_testing.cos_theta(vuv))
msg = "`project` method needs"
with self.assertRaisesRegex(NotImplementedError, msg):
self.object_testing.project(b.data)
# class TestGSE(VectorTestBase, base.SWEData):
# @classmethod
# def set_object_testing(cls):
# # print("TestGSE.set_object_testing", flush=True)
# data = cls.data.gse.xs("", axis=1, level="S")
# gse = vector.Vector(data)
# cls.object_testing = gse
# cls.data = data
# # print("Done with TestGSE.set_object_testing", flush=True)
class TestBField(VectorTestBase, base.SWEData):
@classmethod
def set_object_testing(cls):
# print("BField.set_object_testing", flush=True)
data = cls.data.b.xs("", axis=1, level="S")
# b = vector.Vector(data)
b = vector.BField(data)
cls.object_testing = b
cls.data = data
# print("Done with BField.set_object_testing", flush=True)
def test_pressure(self):
print_inline_debug = False
bsq = self.data.loc[:, ["x", "y", "z"]].pow(2.0).sum(axis=1)
const = 1e-18 / (2.0 * constants.mu_0 * 1e-12) # ([b]**2 / 2.0 * \mu_0 * [p])
pb = bsq * const
pb.name = "pb"
# ot = self.object_testing
# pdb.set_trace()
if print_inline_debug:
print(
"",
"<Test>",
"<bsq>",
type(bsq),
bsq,
"<const>: %s" % const,
"<pb>",
type(pb),
pb,
sep="\n",
end="\n\n",
)
print(
"<Module>",
"<object testing>",
type(self.object_testing),
self.object_testing,
"<dir(ot)>",
*dir(self.object_testing),
sep="\n",
end="\n\n"
)
pdt.assert_series_equal(pb, self.object_testing.pressure)
pdt.assert_series_equal(pb, self.object_testing.pb)
| pdt.assert_series_equal(self.object_testing.pressure, self.object_testing.pb) | pandas.testing.assert_series_equal |
#primer
'''
__author__ = "<NAME>"
__Copyright__ "Copyright September 2019, <NAME>"
__License__ = "GPL"
__email__ "<EMAIL>"
'''
import os, glob
import numpy as np
import pandas as pd
import Bio
from Bio.Seq import MutableSeq, Seq
from Bio import SeqIO
from Bio.SeqUtils import GC
from typing import Tuple
def degenerate_primer(primer:'Bio.Seq.MutableSeq') -> str:
forward = (str(primer),)
for i in range(0, len(primer)):
for j in range(0, len(forward)):
primer = MutableSeq(forward[j])
if (primer[i] == 'A') or (primer[i] == 'C') or (primer[i] == 'G') or (primer[i] == 'T'):
pass
else:
forward = degenerate_primer_list(forward, primer, i, primer[i])
return forward
def degenerate_primer_list(forward:'str', primer:'Bio.Seq.MutableSeq', i:'int', letter:'str') -> str:
R = ['A', 'G', 'R']
M = ['A', 'C', 'M']
S = ['C', 'G', 'S']
B = ['C', 'G', 'T', 'B']
H = ['A', 'C', 'T', 'H']
N = ['A', 'C', 'G', 'T', 'N']
Y = ['C', 'T', 'Y']
K = ['G', 'T', 'K']
W = ['A', 'T', 'W']
D = ['A', 'G', 'T', 'D']
V = ['A', 'C', 'G', 'V']
mixed_nucleotides = [R, M, S, B, H, N, Y, K, W, D, V]
mixed_strings = ['R', 'M', 'S', 'B', 'H', 'N', 'Y', 'K', 'W', 'D', 'V']
k = 0
for string in mixed_strings:
if letter == string:
break
else:
k = k+1
for basepair in mixed_nucleotides[k]:
primer[i] = basepair
forward = forward + (str(primer),)
return forward
def forward_primer_search(species:'str', forward_primer:'tuple') -> Tuple[str, str, str]:
primer_match_query = []
fwd_primer_set = []
init_len = len(species)
for i in range(0,len(forward_primer)):
primer_match_query.append(species.find(forward_primer[i]))
fwd_primer_set.append(forward_primer[i])
if all(item == -1 for item in primer_match_query):
return str(''), str('N/a'), str('N/a')
else:
for k in range(0, len(primer_match_query)):
if primer_match_query[k] != -1:
forward_amplicon_segment = species[primer_match_query[k]:len(species)]
fwd_primer_used = forward_primer[k]
foward_primer_position = len(species) - len(forward_amplicon_segment)
else:
pass
return forward_amplicon_segment, fwd_primer_used, foward_primer_position
def reverse_primer_search(species:'str', reverse_primer_set:'tuple') -> Tuple[str, str, str]:
primer_match_query = []
rev_primer_set = []
for i in range(0,len(reverse_primer_set)):
reverse_primer = Seq(reverse_primer_set[i])
reverse_primer_complement = str(reverse_primer.reverse_complement())
primer_match_query.append(species.find(reverse_primer_complement))
rev_primer_set.append(reverse_primer_complement)
if all(item == -1 for item in primer_match_query):
return str(''), str('N/a'), str('N/a')
else:
for j in range(0,len(primer_match_query)):
if primer_match_query[j] != -1:
amplicon_segment = species[0:primer_match_query[j]+len(reverse_primer_complement)]
rev_primer_used = rev_primer_set[j]
reverse_primer_position = len(amplicon_segment)-len(reverse_primer_complement)
else:
pass
return amplicon_segment, rev_primer_used, reverse_primer_position
def create_PCR_amplicon(core_data:'pd.DataFrame', rev_tup:'tuple', fwd_tup:'tuple') -> pd.DataFrame:
add_on_data = []
all_sequnces = []
for item in core_data['Record id']:
[item_rev, rev_primer_used, reverse_primer_position] = reverse_primer_search(core_data.loc[(core_data['Record id'] == item)]['16S Sequence'].item(), rev_tup)
[item_amplicon, fwd_primer_used, forward_primer_position] = forward_primer_search(item_rev, fwd_tup)
add_on_data.append([core_data.loc[(core_data['Record id'] == item)]['Species'].item(),
item,
fwd_primer_used,
forward_primer_position,
rev_primer_used, reverse_primer_position,
round(GC(item_amplicon), 1),
len(item_amplicon),
item_amplicon])
columns = ['Species', 'Record id', 'Forward Primer', 'forward_primer_position', 'Reverse Primer', 'reverse_primer_position', 'GC Content', 'Length of Amplicon', 'Amplicon',]
calculated_data = | pd.DataFrame(add_on_data, columns=columns) | pandas.DataFrame |
import glob
import pandas as pd
import os
import folium
import dataretrieval.nwis as nwis
import requests
import geopandas as gpd
import rioxarray
def catalogue_csv(input_fold):
os.chdir(input_fold)
band=input_fold.split("/")[len(input_fold.split("/"))-1]
path_parent = os.path.dirname(input_fold)
csv_fold=path_parent+'/coastcorr_csv'
res_out=csv_fold+'/'+band+"_combined_csv.csv"
if not os.path.exists(csv_fold): os.makedirs(csv_fold)
if not os.path.exists(res_out):
print('combined csv not located, combining csv files')
#create list of files in folder
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])
res = combined_csv.pivot_table(index=['Date'], columns='geometry',
values='data', aggfunc='first').reset_index()
print('csv files combined')
res.to_csv(res_out)
print('saved to file')
print('Done')
def get_data(envin,station_name,csv_folder):
if envin == 'River Discharge':
collector='USGS'
site_path=os.path.dirname(os.path.dirname(os.path.dirname(csv_folder)))
env_fold=site_path+'/environmental_data_csv'
collec_fold=env_fold+'/'+collector
file_out= collec_fold+"/"+station_name+'.csv'
print('checking to see if data already downloaded')
if not os.path.exists(env_fold): os.makedirs(env_fold)
if not os.path.exists(collec_fold): os.makedirs(collec_fold)
if not os.path.exists(file_out):
print('data not downloaded, downloading data')
files = os.listdir(csv_folder)
sorted_files = sorted(files)
start=sorted_files[0].split(".")[0]
end=sorted_files[(len(sorted_files)-1)].split(".")[0]
env_df=nwis.get_record(sites=station_name, service='iv', start=start, end=end)
env_df= env_df.reset_index()
env_df.to_csv(file_out)
print('USGS Gauge:', station_name, 'Data Pulled')
if envin == 'Modeled Wave Energy':
collector='CDIP'
site_path=os.path.dirname(os.path.dirname(os.path.dirname(csv_folder)))
env_fold=site_path+'/environmental_data_csv'
collec_fold=env_fold+'/'+collector
file_out= collec_fold+"/"+station_name+'.csv'
print('checking to see if data already downloaded')
if not os.path.exists(env_fold): os.makedirs(env_fold)
if not os.path.exists(collec_fold): os.makedirs(collec_fold)
if not os.path.exists(file_out):
print('data not downloaded, downloading data')
files = os.listdir(csv_folder)
sorted_files = sorted(files)
start=sorted_files[0]
end=sorted_files[(len(sorted_files)-1)]
env_df=nwis.get_record(sites=station_name, service='iv', start=start, end=end)
env_df= env_df.reset_index()
env_df.to_csv(file_out)
print('USGS Gauge:', station_name, 'Data Pulled')
def data_binner(env_df, sr_df, binval, station_name, csv_folder, sat, envin):
if envin == 'River Discharge':
if sat in ['MODIS Aqua','MODIS Terra']:
freq=binval+'h'
collector='USGS'
site_path=os.path.dirname(os.path.dirname(os.path.dirname(csv_folder)))
env_fold=site_path+'/environmental_data_csv'
collec_fold=env_fold+'/'+collector
file_out= collec_fold+"/"+station_name+'_'+freq+'.csv'
if not os.path.exists(file_out):
df_sr=sr_df
df_env=env_df
df_env['datetime']=pd.to_datetime(df_env['datetime'], utc=True)
#convert usgs data from pdt and pst to utc
df_env['datetime']=df_env['datetime'].dt.tz_convert('ETC/UTC')
df_env['datetime'] = pd.to_datetime(df_env['datetime'], errors='coerce', format='%Y-%m-%d %H:%M:%S')
df_env['Time'] = pd.to_datetime(df_env['datetime'], format='%H:%M:%S')
df_env['Hour'] = df_env['Time'].dt.hour
print('Data Time Zone Converted')
#convert discharge to cms
df_env['Q']=df_env['00060']*0.028316847
df_env=df_env.drop(columns='Time')
df_env=df_env.drop(columns='00060')
df_env=df_env.drop(columns='00060_cd')
df_env=df_env.drop(columns='site_no')
df_env=df_env.drop(columns='00065')
df_env=df_env.drop(columns='00065_cd')
df_env.index=df_env['datetime']
df_env=df_env.resample('H').mean() #convert to hourly averages
df_env=df_env.resample('H').mean()
df_env=df_env.groupby(pd.Grouper(level=0, base=20, freq=freq)).mean()
df_env=df_env.reset_index()
df_env['Date']=df_env['datetime'].dt.date
df_env=df_env.drop(columns='datetime')
df_env['Q'] = df_env.Q.shift(1)
print('Data Binned to Overpass Time')
lwrbnd=df_env['Q'].quantile(float(int0.value))
upperbnd=df_env['Q'].quantile(float(int1.value))
#subset environmental dataframe with percentiles
df_env=df_env[df_env['Q']>lwrbnd]
df_env=df_env[df_env['Q']<=upperbnd]
#merge sr dataframe and environmental dataframe
df_sr['Date']=pd.to_datetime(df_sr['Date'], errors='coerce', format='%Y-%m-%d')
df_env['Date']= | pd.to_datetime(df_sr['Date'], errors='coerce', format='%Y-%m-%d') | pandas.to_datetime |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
return df_book_features
def computeFeatures_newTest_Laurent_wTrades(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
list_trades1, list_trades2 = [], []
list_vlad_book, list_vlad_trades = [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
trades_stock = load_trades_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
trades_stock = load_trades_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap4(book_stock)
book_stock['mid_price'] = mid_price(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2': 'rv2_300', 'wap3': 'rv3_300', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2': 'rv2_480', 'wap3': 'rv3_480', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance_mean','price_spread_mean','bid_spread_mean','ask_spread_mean','total_vol_mean','vol_imbalance_mean','wap_imbalance_sum','price_spread_sum','bid_spread_sum','ask_spread_sum','total_vol_sum','vol_imbalance_sum','wap_imbalance_std','price_spread_std','bid_spread_std','ask_spread_std','total_vol_std','vol_imbalance_std','wap_imbalance_max','price_spread_max','bid_spread_max','ask_spread_max','total_vol_max','vol_imbalance_max','wap_imbalance_min','price_spread_min','bid_spread_min','ask_spread_min','total_vol_min','vol_imbalance_min']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance_mean','price_spread_mean','bid_spread_mean','ask_spread_mean','total_vol_mean','vol_imbalance_mean','wap_imbalance_sum','price_spread_sum','bid_spread_sum','ask_spread_sum','total_vol_sum','vol_imbalance_sum','wap_imbalance_std','price_spread_std','bid_spread_std','ask_spread_std','total_vol_std','vol_imbalance_std','wap_imbalance_max','price_spread_max','bid_spread_max','ask_spread_max','total_vol_max','vol_imbalance_max','wap_imbalance_min','price_spread_min','bid_spread_min','ask_spread_min','total_vol_min','vol_imbalance_min']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
df_sub_book_feats_300 = df_sub_book_feats.copy()
for col in df_sub_book_feats_300.columns:
df_sub_book_feats_300[col].values[:] = 0
list_fin2.append(df_sub_book_feats_300)
# Trades features (sum, mean, std, max, min)
df_sub_trades_feats = trades_stock.groupby(['time_id'])['price','size','order_count'].agg(['sum','mean','std','max','min']).reset_index()
df_sub_trades_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_trades_feats['time_id']]
df_sub_trades_feats = df_sub_trades_feats.rename(columns={'time_id':'row_id'})
list_trades1.append(df_sub_trades_feats)
# Query segments
bucketQuery300_trades = trades_stock.query(f'seconds_in_bucket >= 300')
isEmpty300_trades = bucketQuery300_trades.empty
if isEmpty300_trades == False:
df_sub_trades_300 = bucketQuery300_trades.groupby(['time_id'])['price','size','order_count'].agg(['sum','mean','std','max','min']).reset_index()
df_sub_trades_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_trades_300['time_id']]
df_sub_trades_300 = df_sub_trades_300.rename(columns={'time_id':'row_id'})
else:
df_sub_trades_300 = df_sub_trades_feats.copy()
for col in df_sub_trades_300.columns:
df_sub_trades_300[col].values[:] = 0
list_trades2.append(df_sub_trades_300)
# Fin metrics book
df_fin_metrics_book = book_stock.groupby(['time_id']).apply(fin_metrics_book_data).to_frame().reset_index()
df_fin_metrics_book = df_fin_metrics_book.rename(columns={0:'embedding'})
df_fin_metrics_book[['spread','depth_imb']] = pd.DataFrame(df_fin_metrics_book.embedding.tolist(), index=df_fin_metrics_book.index)
df_fin_metrics_book['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_fin_metrics_book['time_id']]
df_fin_metrics_book = df_fin_metrics_book.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_vlad_book.append(df_fin_metrics_book)
# Fin metrics trades
df_fin_metrics_trades = trades_stock.groupby(['time_id']).apply(fin_metrics_trades_data).to_frame().reset_index()
df_fin_metrics_trades = df_fin_metrics_trades.rename(columns={0:'embedding'})
df_fin_metrics_trades[['roll_measure', 'roll_impact', 'mkt_impact', 'amihud']] = pd.DataFrame(df_fin_metrics_trades.embedding.tolist(), index=df_fin_metrics_trades.index)
df_fin_metrics_trades['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_fin_metrics_trades['time_id']]
df_fin_metrics_trades = df_fin_metrics_trades.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_vlad_trades.append(df_fin_metrics_trades)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = | pd.concat(list_rv2) | pandas.concat |
import requests
import pandas as pd
import json
from string import Template
from messari.defillama import DeFiLlama
dl = DeFiLlama()
CHAIN_URL = Template('https://defillama.com/_next/data/mDaYGJz3iJDOSw9H7xWVG/chain/$chain.json')
###### specify chain and number of top N protocols on chain
chain = 'Solana'
N = 3
endpoint_url = CHAIN_URL.substitute(chain=chain)
###### ohhhh boy this is goooood
# this gets a tvl breakdown by protocol on specific chain
# data is localized to chain
page_props = requests.get(endpoint_url).json()['pageProps']
protocols_df = | pd.DataFrame(page_props['filteredProtocols']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scanpy as sc
from termcolor import colored
import time
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import seaborn as sns
from pyVIA.core import *
def cellrank_Human(ncomps=80, knn=30, v0_random_seed=7):
import scvelo as scv
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
ad = scv.read_loom('/home/shobi/Downloads/Human Hematopoietic Profiling homo_sapiens 2019-11-08 16.12.loom')
print(ad)
# ad = sc.read('/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# ad.obs['nover_label'] = nover_labels
print('start cellrank pipeline', time.ctime())
# scv.utils.show_proportions(ad)
scv.pl.proportions(ad)
scv.pp.filter_and_normalize(ad, min_shared_counts=20, n_top_genes=2000)
sc.tl.pca(ad, n_comps=ncomps)
n_pcs = ncomps
print('npcs', n_pcs, 'knn', knn)
sc.pp.neighbors(ad, n_pcs=n_pcs, n_neighbors=knn)
sc.tl.louvain(ad, key_added='clusters', resolution=1)
scv.pp.moments(ad, n_pcs=n_pcs, n_neighbors=knn)
scv.tl.velocity(ad)
scv.tl.velocity_graph(ad)
scv.pl.velocity_embedding_stream(ad, basis='umap', color='nover_label')
def adata_preprocess(adata, n_top_genes=1000, log=True):
# this is a lot like the steps for scvelo.pp.filter_and_normalize() which also allows selection of top genes (see Pancreas)
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count#1
# print(adata)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell #same as normalize_total()
adata, key_n_counts='n_counts_all'
)
# select highly-variable genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
total = adata.X
total = total.sum(axis=0).transpose()
total = pd.DataFrame(total.transpose())
print('total')
print(total.shape)
#total = total.sum(axis=0).transpose()
total.columns = [i for i in adata.var_names]
print(total)
total.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/library_counts_500hvg.csv')
sc.pp.scale(adata, max_value=10)
from sklearn.decomposition import PCA
pca = PCA(n_components=499) # estimate only 2 PCs
X_new = pca.fit_transform(adata.X)
print('variance explained')
print(pca.explained_variance_ratio_)
print('pca.components_ shape ncomp x nfeat')
print()
df = pd.DataFrame(abs(pca.components_))
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print('done saving')
'''
# sc.pp.scale(adata, max_value=10)zheng scales after the log, but this doesnt work well and is also not used in scvelo.pp.filter_and_normalize
return adata
def main_Human(ncomps=80, knn=30, v0_random_seed=7, run_palantir_func=False):
'''
df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print(df)
df = df.set_index('Unnamed: 0')
print(df)
df = df.sort_values(by='totals', axis=1, ascending = False)
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_sorted_500hvg.csv')
print('saved')
'''
import random
random.seed(100)
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC (cDC)',
'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
# NOTE: Myeloid DCs are now called Conventional Dendritic Cells cDCs
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
df_nover = pd.DataFrame(nover_labels)
# df_nover.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/noverLabelsforMonocle.csv')
print('save nover')
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
palantir_tsne_df = pd.DataFrame(tsnem)
# palantir_tsne_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/palantir_tsne.csv')
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw.X: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(ad.X)
print(ad.raw.X.shape)
# df_X = pd.DataFrame(ad.raw.X.todense(), columns = ad.var_names)
# df_X.columns = [i for i in ad.var_names]
# print('starting to save .X')
# df_X.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/expression_matrix_raw.csv")
print('finished save .X')
# (ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
adata_counts_raw = sc.AnnData(ad.raw.X)
adata_counts_raw.var_names = [i for i in ad.var_names]
# adata_counts_raw = adata_preprocess(adata_counts_raw, n_top_genes=500, log=True) # when using HVG and no PCA
# sc.tl.pca(adata_counts_raw,svd_solver='arpack', n_comps=ncomps)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = [
'ITGAX'] # ['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
# 'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
true_label = nover_labels # revised_clus
root_user = [4823]
print('v0 random seed', v0_random_seed)
# df_temp_write = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
# df_temp_write.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/Human_CD34_200PCA.csv")
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.X.todense()
print(time.ctime())
print(time.ctime())
v0 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.2,
root_user=root_user, dataset='humanCD34', preserve_disconnected=True, random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, pseudotime_threshold_TS=10,
neighboring_terminal_states_threshold=3) # *.4 root=1,
v0.run_VIA()
v0.make_JSON(filename='scRNA_Hema_temp.js')
super_labels = v0.labels
print('starting to save selected genes')
genes_save = ['ITGAX', 'GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA',
'ITGAX', 'IGHD',
'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
df_selected_genes = pd.DataFrame(adata_counts.X, columns=[cc for cc in adata_counts.var_names])
df_selected_genes = df_selected_genes[genes_save]
# df_selected_genes.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/selected_genes.csv")
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
print('start magic')
gene_list_magic = ['IL3RA', 'IRF8', 'GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B', 'SPI1', 'CD34', 'CSF1R', 'ITGAX']
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=gene_list_magic)
df_magic_cluster = df_magic.copy()
df_magic_cluster['parc'] = v0.labels
df_magic_cluster = df_magic_cluster.groupby('parc', as_index=True).mean()
print('end magic', df_magic.shape)
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v0.draw_piechart_graph(ax, ax1, type_pt='gene', gene_exp=df_magic_cluster['GATA1'].values, title='GATA1')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
draw_trajectory_gams(tsnem, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, v0.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=str(Xin.shape[1]))
plt.show()
print('super labels', set(super_labels))
ad.obs['via0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['via0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA', 'ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
sc.pl.matrixplot(magic_ad, marker_genes, groupby='via0_label', dendrogram=True)
'''
sc.tl.rank_genes_groups(ad, groupby='via0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="via0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='via0_label', n_genes = 3) # plot the result
'''
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34', 'GATA1', 'IL3RA']: # ,'SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
plt.show()
super_edges = v0.edgelist_maxout # v0.edgelist
tsi_list = get_loc_terminal_states(v0, Xin)
v1 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.95, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=v0.terminal_clusters, is_coarse=False, full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
random_seed=v0_random_seed, pseudotime_threshold_TS=10) # *.4super_terminal_cells = tsi_list #3root=1,
v1.run_VIA()
labels = v1.labels
v1.make_JSON(filename='scRNA_Hema_via1_temp.js')
df_magic_cluster = df_magic.copy()
df_magic_cluster['via1'] = v1.labels
df_magic_cluster = df_magic_cluster.groupby('via1', as_index=True).mean()
# print('df_magic_cluster', df_magic_cluster)
'''
#Get the clustsergraph gene expression on topology
for gene_i in gene_list_magic:
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v1.draw_piechart_graph(ax,ax1,type_pt='gene', gene_exp = df_magic_cluster[gene_i].values, title = gene_i)
plt.show()
'''
ad.obs['parc1_label'] = [str(i) for i in labels]
'''
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in v1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = v0.knn_struct.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=v1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
# print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # v1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34']: # ['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
# v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name] + 'VIA MAGIC')
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov)[idx])
# graph_hnsw = v0.knngraph_visual()
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# phate_op = phate.PHATE()
# embedding = phate_op.fit_transform(adata_counts.obsm['X_pca'][:, 0:20])
# embedding = embedding[idx, :]
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
# DRAW EVOLUTION PATHS
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Toy_comparisons(ncomps=10, knn=30, random_seed=42, dataset='Toy3', root_user='M1',
foldername="/home/shobi/Trajectory/Datasets/Toy3/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
# root_user = ["T1_M1", "T2_M1"] # "M1" # #'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy3":
print('dataset Toy3')
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv",
delimiter=",")
#df_counts = pd.read_csv(foldername + "Toy3_noise_100genes_thinfactor8.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
print('df_ids', df_ids.columns)
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C107'
if dataset == "Toy4": # 2 disconnected components
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "Toy4_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
print(df_counts.shape, 'df_counts shape')
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T2_M1'
palantir_root = 'C107'
if dataset == "Connected":
df_counts = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000.csv", delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected/ToyConnected_M9_n2000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1'
if dataset == "Connected2":
df_counts = pd.read_csv(foldername + "Connected2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected2_noise_500genes.csv", 'rt',delimiter=",")
df_ids = pd.read_csv(foldername + "Connected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C11'
# suggest to use visual jaccard pruning of 1 (this doesnt alter underlying graph, just the display. can also use "M2" as the starting root,
if dataset == "ToyMultiM11":
df_counts = pd.read_csv(foldername + "Toymulti_M11_n3000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyMulti_M11_noised.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "Toymulti_M11_n3000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv( "/home/shobi/Trajectory/Datasets/ToyMultifurcating_M11/Toymulti_M11_n3000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1005'
if dataset == "Cyclic": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_noise_100genes_thinfactor3.csv",
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1'
if dataset == "Cyclic2": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/ToyCyclic2_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C107'
if dataset == 'Bifurc2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/ToyBifurc2_noised.csv", delimiter=",")
df_ids = pd.read_csv( "/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000_ids_with_truetime.csv",delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1006'
if dataset == 'Disconnected2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/ToyDisconnected2_noise_500genes.csv",
delimiter=",")
df_ids = pd.read_csv(
"/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv")
root_user = ['T1_M1', 'T1_M2', 'T1_M3'] # 'T1_M1'
paga_root = 'T1_M1'
palantir_root = 'C125'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
# df_ids.to_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000_ids_sorted_with_truetime.csv")
# df_counts['group_id'] = df_ids['group_id']#to split Toy4
# df_counts['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_ids['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_counts = df_counts[df_counts['main_Traj']=='T2']#to split Toy4
# df_ids = df_ids[df_ids['main_Traj'] == 'T2']#to split Toy4
#true_time = df_ids['true_time']
true_label = df_ids['group_id'].tolist()
# df_counts = df_counts.drop('main_Traj', 1)#to split Toy4
# df_counts = df_counts.drop('group_id', 1)#to split Toy4
# df_ids = df_ids.reset_index(drop=True)#to split Toy4
# df_counts = df_counts.reset_index(drop=True)#to split Toy4
# true_label = df_ids['group_id'] #to split Toy4
print("shape", df_counts.index, df_ids.index)
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# comparisons
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
do_paga = False #
do_palantir = False #
# comparisons
if do_paga == True:
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X', ) # n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
# sc.tl.diffmap(adata_counts, n_comps=ncomps)
sc.tl.diffmap(adata_counts, n_comps=200) # default retains n_comps = 15
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0, random_state=10)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['leiden','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
df_paga = pd.DataFrame()
df_paga['paga_dpt'] = adata_counts.obs['dpt_pseudotime'].values
correlation = df_paga['paga_dpt'].corr(df_ids['true_time'])
print('corr paga knn', knn, correlation)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
# X = df_counts.values
'''
# palantir
if do_palantir == True:
print(palantir.__file__) # location of palantir source code
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts # palantir.preprocess.normalize_counts(counts)
# pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps) #normally use
pca_projections = counts
dm_res = palantir.utils.run_diffusion_maps(pca_projections, knn=knn,
n_components=300) ## n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
# c107 for T1_M1, C42 for T2_M1 disconnected
# C1 for M8_connected, C1005 for multi_M11 , 'C1006 for bifurc2'
pr_res = palantir.core.run_palantir(ms_data, early_cell=palantir_root, num_waypoints=500, knn=knn)
df_palantir = pd.read_csv(
'/home/shobi/Trajectory/Datasets/Toy3/palantir_pt.csv') # /home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
pt = df_palantir['pt']
correlation = pt.corr(true_time)
print('corr Palantir', correlation)
print('')
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=pca_projections.shape[1])
plt.show()
'''
# from sklearn.decomposition import PCA
# pca = PCA(n_components=ncomps)
# pc = pca.fit_transform(df_counts)
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts.X
if dataset == 'Toy4':
jac_std_global = .15 # .15
else:
jac_std_global = 0.15 # .15#0.15 #bruge 1 til cyclic2, ellers 0.15
#
v0 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via1 knn', knn, correlation)
labels = v1.labels
# v1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# v1.run_VIA()
# labels = v1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
embedding = adata_counts.obsm['X_pca'][idx,
0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = umap.UMAP().fit_transform(Xin) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
'''
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(Xin.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_i)
# knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Toy(ncomps=10, knn=30, random_seed=41, dataset='Toy3', root_user=['M1'],
cluster_graph_pruning_std=1., foldername="/home/shobi/Trajectory/Datasets/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
if dataset == "Toy3":
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['M1']
paga_root = "M1"
if dataset == "Toy4": # 2 disconnected components
print('inside toy4')
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T1_M1'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
# print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id'].tolist()
#true_time = df_ids['true_time']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# true_label =['a' for i in true_label] #testing dummy true_label
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
# via_wrapper(adata_counts, true_label, embedding= adata_counts.obsm['X_pca'][:,0:2], root=[1], knn=30, ncomps=10,cluster_graph_pruning_std = 1)
# print('starting via wrapper disconn')
# via_wrapper_disconnected(adata_counts, true_label, embedding=adata_counts.obsm['X_pca'][:, 0:2], root=[23,902], preserve_disconnected=True, knn=10, ncomps=10, cluster_graph_pruning_std=1 ,random_seed=41)
# print('end via wrapper disconn')
if dataset == 'Toy4':
jac_std_global = 0.15 # 1
else:
jac_std_global = 0.15
import umap
embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:, 0:10]) # 50
# embedding = adata_counts.obsm['X_pca'][:, 0:2]
# plt.scatter(embedding[:,0],embedding[:,1])
# plt.show()
print('root user', root_user)
v0 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed, piegraph_arrow_head_width=0.4,
piegraph_edgeweight_scalingfactor=1.0) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
print('super labels', type(super_labels))
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
labels = v1.labels
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
n_downsample = 50
if len(labels) > n_downsample: # just testing the downsampling and indices. Not actually downsampling
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
embedding = embedding[idx, :]
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
# embedding = adata_counts.obsm['X_pca'][idx, 0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
'''
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax2.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
'''
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
v1.get_gene_expression(subset_, title_gene=gene_i)
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Bcell(ncomps=50, knn=20, random_seed=0, cluster_graph_pruning_std=.15,path='/home/shobi/Trajectory/Datasets/Bcell/'):
print('Input params: ncomp, knn, random seed', ncomps, knn, random_seed)
# https://github.com/STATegraData/STATegraData
def run_zheng_Bcell(adata, min_counts=3, n_top_genes=500, do_HVG=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
'''
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
'''
sc.pp.normalize_total(adata, target_sum=1e4)
if do_HVG == True:
sc.pp.log1p(adata)
'''
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False )
adata = adata[:, filter_result.gene_subset] # subset the genes
'''
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, min_mean=0.0125, max_mean=3,
min_disp=0.5) # this function expects logarithmized data
print('len hvg ', sum(adata.var.highly_variable))
adata = adata[:, adata.var.highly_variable]
sc.pp.normalize_per_cell(adata) # renormalize after filtering
# if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
if do_HVG == False: sc.pp.log1p(adata)
sc.pp.scale(adata, max_value=10) # scale to unit variance and shift to zero mean
return adata
'''
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.columns = [i for i in ad.var_names]
# print('norm df', norm_df_pal)
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c42' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=ncomps)
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Ldha', 'Foxo1', 'Lig4'] # , 'Slc7a5']#,'Slc7a5']#,'Sp7','Zfp629']
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, Bcell_marker_gene_list])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
'''
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def find_time_Bcell(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID_Bcell(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv(path + 'genes_count_table.txt', sep='\t')
gene_name = pd.read_csv(path + 'genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
time_list = [find_time_Bcell(s) for s in Bcell_columns]
print('time list set', set(time_list))
adata_counts.obs['TimeStamp'] = [str(tt) for tt in time_list]
ID_list = [find_cellID_Bcell(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
print('timelist', list(set(time_list)))
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
small_large_gene_list = ['Kit', 'Pcna', 'Ptprc', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b', 'Mme',
'Spn']
list_var_names = [s for s in adata_counts_unfiltered.var_names]
matching = [s for s in list_var_names if "IgG" in s]
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
for gene_name in small_large_gene_list:
print('looking at small-big list')
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
# diff_list = [i for i in diff_list if i in list_var_names] #based on paper STable1 https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.2006506#pbio.2006506.s007
# adata_counts = adata_counts[:,diff_list] #if using these, then set do-HVG to False
print('adata counts difflisted', adata_counts.shape)
adata_counts = run_zheng_Bcell(adata_counts, n_top_genes=5000, min_counts=30,
do_HVG=True) # 5000 for better ordering
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
# (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
marker_genes = {"small": ['Rag2', 'Rag1', 'Pcna', 'Myc', 'Ccnd2', 'Cdkn1a', 'Smad4', 'Smad3', 'Cdkn2a'],
# B220 = Ptprc, PCNA negative for non cycling
"large": ['Ighm', 'Kit', 'Ptprc', 'Cd19', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b'],
"Pre-B2": ['Mme', 'Spn']} # 'Cd19','Cxcl13',,'Kit'
print('make the v0 matrix plot')
mplot_adata = adata_counts_unfiltered.copy() # mplot_adata is for heatmaps so that we keep all genes
mplot_adata = run_zheng_Bcell(mplot_adata, n_top_genes=25000, min_counts=1, do_HVG=False)
# mplot_adata.X[mplot_adata.X>10] =10
# mplot_adata.X[mplot_adata.X< -1] = -1
# sc.pl.matrixplot(mplot_adata, marker_genes, groupby='TimeStamp', dendrogram=True)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=200) # ncomps
# df_bcell_pc = pd.DataFrame(adata_counts.obsm['X_pca'])
# print('df_bcell_pc.shape',df_bcell_pc.shape)
# df_bcell_pc['time'] = [str(i) for i in time_list]
# df_bcell_pc.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PCs.csv')
# sc.pl.pca_variance_ratio(adata_counts, log=True)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
# sc.pl.heatmap(mplot_adata, var_names = small_large_gene_list,groupby = 'TimeStamp', dendrogram = True)
embedding = umap.UMAP(random_state=42, n_neighbors=15, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
df_umap = pd.DataFrame(embedding)
# df_umap.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_umap.csv')
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)[0]
ax4.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
if i == 0:
for xx in range(len(loc)):
poss = loc[xx]
ax4.text(embedding[poss, 0], embedding[poss, 1], 'c' + str(xx))
ax4.legend()
ax1.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Pcna'].X.flatten(), alpha=1)
ax1.set_title('Pcna, cycling')
ax2.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Vpreb1'].X.flatten(), alpha=1)
ax2.set_title('Vpreb1')
ax3.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Cd24a'].X.flatten(), alpha=1)
ax3.set_title('Cd24a')
# ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
#run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
input_via = adata_counts.obsm['X_pca'][:, 0:ncomps]
df_input = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
df_annot = pd.DataFrame(['t' + str(i) for i in true_label])
# df_input.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PC_5000HVG.csv')
# df_annot.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_annots.csv')
root_user = [42]
v0 = VIA(input_via, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
cluster_graph_pruning_std=cluster_graph_pruning_std,
root_user=root_user, preserve_disconnected=True, random_seed=random_seed,
do_impute_bool=True) # *.4#root_user = 34
v0.run_VIA()
super_labels = v0.labels
tsi_list = get_loc_terminal_states(via0=v0, X_input=adata_counts.obsm['X_pca'][:, 0:ncomps])
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, is_coarse=False,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=v0.terminal_clusters, random_seed=random_seed)
v1.run_VIA()
labels = v1.labels
super_edges = v0.edgelist
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4', 'Sp7', 'Zfp629'] # irf4 down-up
df_ = pd.DataFrame(adata_counts_unfiltered.X) # no normalization, or scaling of the gene count values
df_.columns = [i for i in adata_counts_unfiltered.var_names]
df_Bcell_marker = df_[Bcell_marker_gene_list]
print(df_Bcell_marker.shape, 'df_Bcell_marker.shape')
df_Bcell_marker.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_markergenes.csv')
# v0 is run with "do_impute" = true, hence it stores the full graph (in subsequent iterations we dont recompute and store the full unpruned knn graph)
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=Bcell_marker_gene_list)
for gene_name in Bcell_marker_gene_list:
# loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
subset_ = df_magic[gene_name].values
v1.get_gene_expression(subset_, title_gene=gene_name)
# magic_ad = adata_counts_unfiltered.X[:, loc_gata]
# v1.get_gene_expression(magic_ad, gene_name)
n_downsample = 100
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
# idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
idx = np.arange(0, len(labels))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list((np.asarray(true_label)[idx]))
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
graph_embedding = v0.knngraph_visual(input_via[idx, 0:5], knn_umap=10, downsampled=True)
embedding_hnsw = v0.run_umap_hnsw(input_via[idx, 0:5], graph_embedding)
# embedding = embedding_hnsw
# loc0 = np.where(np.asarray(true_label)==0)[0]
# for item in loc0:
# print(item, 'at', embedding[item,:])
embedding = embedding[idx, :]
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = v1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def plot_EB():
# genes along lineage cluster path
df_groupby_p1 = pd.read_csv(
'/home/shobi/Trajectory/Datasets/EB_Phate/df_groupbyParc1_knn20_pc100_seed20_allgenes.csv')
path_clusters = [43, 38, 42, 56, 7,
3] # NC[43,41,16,2,3,6]#SMP[43,41,16,14,11,18]#C[43,41,16,14,12,15]#NS3[43,38,42,56,7,3]
target = "NS 3" # 'NC 6' #'SMP 18'#' Cardiac 15'
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'], 'NS': ['LHX2', 'NR2F1', 'DMRT3', 'LMX1A',
# 'KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1','PAX6', 'ZBTB16','NPAS1', 'SOX1'
'NKX2-8', 'EN2'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'],
'Pre-NE': ['POU5F1', 'OTX2'], 'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6'],
# 'OLIG3','HOXD1', 'ZIC2', 'ZIC5','HOXA2','HOXB2'
'ESC': ['NANOG', 'POU5F1'], 'Pre-NE': ['POU5F1', 'OTX2'], 'Lat-ME': ['TBX5', 'HOXD9', 'MYC']}
relevant_genes = []
relevant_keys = ['ESC', 'Pre-NE', 'NE', 'NP',
'NS'] # NC['ESC', 'Pre-NE', 'NE', 'NC']#SMP['ESC','PS/ME','Lat-ME','SMP']#NS['ESC', 'Pre-NE', 'NE', 'NP', 'NS']
dict_subset = {key: value for key, value in marker_genes_dict.items() if key in relevant_keys}
print('dict subset', dict_subset)
for key in relevant_keys:
relevant_genes.append(marker_genes_dict[key])
relevant_genes = [item for sublist in relevant_genes for item in sublist]
print(relevant_genes)
df_groupby_p1 = df_groupby_p1.set_index('parc1')
df_groupby_p1 = df_groupby_p1.loc[path_clusters]
df_groupby_p1 = df_groupby_p1[relevant_genes]
df_groupby_p1 = df_groupby_p1.transpose()
# print( df_groupby_p1.head)
# print(df_groupby_p1)
ax = sns.heatmap(df_groupby_p1, vmin=-1, vmax=1, yticklabels=True)
ax.set_title('target ' + str(target))
plt.show()
# df_groupby_p1 = pd.concat([df_groupby_p1,df_groupby_p1])
# adata = sc.AnnData(df_groupby_p1)
# adata.var_names = df_groupby_p1.columns
# print(adata.var_names)
# adata.obs['parc1'] = ['43','38','42','56','7','3','43','38','42','56','7','3']
# print(adata.obs['parc1'])
# sc.pl.matrixplot(adata, dict_subset, groupby='parc1', vmax=1, vmin=-1, dendrogram=False)
def main_EB_clean(ncomps=30, knn=20, v0_random_seed=24, cluster_graph_pruning_std=.15,
foldername='/home/shobi/Trajectory/Datasets/EB_Phate/'):
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'],
'NS': ['KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1', 'NPAS1', 'LHX2', 'NR2F1',
'NPAS1', 'DMRT3', 'LMX1A',
'NKX2-8', 'EN2', 'SOX1', 'PAX6', 'ZBTB16'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'], 'Pre-NE': ['POU5F1', 'OTX2'],
'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'OLIG3', 'HOXD1', 'ZIC2', 'ZIC5', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6',
'HOXA2', 'HOXB2'], 'ESC': ['NANOG', 'POU5F1', 'OTX2'], 'Pre-NE': ['POU5F1', 'OTX2']}
marker_genes_list = []
for key in marker_genes_dict:
for item in marker_genes_dict[key]:
marker_genes_list.append(item)
v0_too_big = 0.3
v1_too_big = 0.05
n_var_genes = 'no filtering for HVG' # 15000
print('ncomps, knn, n_var_genes, v0big, p1big, randomseed, time', ncomps, knn, n_var_genes, v0_too_big, v1_too_big,
v0_random_seed, time.ctime())
# TI_pcs = pd.read_csv(foldername+'PCA_TI_200_final.csv')
# TI_pcs is PCA run on data that has been: filtered (remove cells with too large or small library count - can directly use all cells in EBdata.mat), library normed, sqrt transform, scaled to unit variance/zero mean
# TI_pcs = TI_pcs.values[:, 1:]
from scipy.io import loadmat
annots = loadmat(
foldername + 'EBdata.mat') # has been filtered but not yet normed (by library size) nor other subsequent pre-processing steps
# print('annots', annots)
data = annots['data'].toarray() # (16825, 17580) (cells and genes have been filtered)
# print('data min max', np.max(data), np.min(data), data[1, 0:20], data[5, 250:270], data[1000, 15000:15050])
loc_ = np.where((data < 1) & (data > 0))
temp = data[(data < 1) & (data > 0)]
# print('temp non int', temp)
time_labels = annots['cells'].flatten().tolist()
# df_timelabels = pd.DataFrame(time_labels, columns=['true_time_labels'])
# df_timelabels.to_csv(foldername+'EB_true_time_labels.csv')
gene_names_raw = annots['EBgenes_name'] # (17580, 1) genes
adata = sc.AnnData(data)
gene_names = []
for i in gene_names_raw:
gene_names.append(i[0][0])
adata.var_names = gene_names
adata.obs['time'] = ['Day' + str(i) for i in time_labels]
adata.X = sc.pp.normalize_total(adata, inplace=False)['X'] # normalize by library after filtering
adata.X = np.sqrt(adata.X) # follow Phate paper which doesnt take log1() but instead does sqrt() transformation
Y_phate = pd.read_csv(foldername + 'EB_phate_embedding.csv')
Y_phate = Y_phate.values
# phate_operator = phate.PHATE(n_jobs=-1)
# Y_phate = phate_operator.fit_transform(adata.X) # before scaling. as done in PHATE
scale = False # scaling mostly improves the cluster-graph heatmap of genes vs clusters. doesnt sway VIA performance
if scale == True: # we scale before VIA. scaling not needed for PHATE
print('pp scaled')
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X, axis=0)
print('data max min after SCALED', np.max(adata.X), np.min(adata.X))
else:
print('not pp scaled')
sc.tl.pca(adata, svd_solver='arpack', n_comps=200, random_state=0)
# adata.obsm['X_pca'] = TI_pcs
input_data = adata.obsm['X_pca'][:, 0:ncomps]
print('do v0')
root_user = [1]
v0 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v0_too_big, root_user=root_user, dataset='EB', random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True) # *.4 root=1,
v0.run_VIA()
tsi_list = get_loc_terminal_states(v0, input_data)
v1 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v1_too_big, super_cluster_labels=v0.labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='EB',
super_terminal_clusters=v0.terminal_clusters, random_seed=21)
v1.run_VIA()
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(Y_phate[:, 0], Y_phate[:, 1], c=time_labels, s=5, cmap='viridis', alpha=0.5)
ax2.scatter(Y_phate[:, 0], Y_phate[:, 1], c=v1.single_cell_pt_markov, s=5, cmap='viridis', alpha=0.5)
ax1.set_title('Embryoid: Annotated Days')
ax2.set_title('Embryoid VIA Pseudotime (Randomseed' + str(v0_random_seed) + ')')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(v1.labels)))
draw_trajectory_gams(Y_phate, super_clus_ds_PCA_loc, v1.labels, v0.labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, time_labels, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times and Paths', ncomp=ncomps)
knn_hnsw = make_knn_embeddedspace(Y_phate)
draw_sc_evolution_trajectory_dijkstra(v1, Y_phate, knn_hnsw, v0.full_graph_shortpath,
idx=np.arange(0, input_data.shape[0]))
plt.show()
adata.obs['via0'] = [str(i) for i in v0.labels]
adata.obs['parc1'] = [str(i) for i in v1.labels]
adata.obs['terminal_state'] = ['True' if i in v1.terminal_clusters else 'False' for i in v1.labels]
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X,
axis=0) # to improve scale of the matrix plot we will scale
sc.pl.matrixplot(adata, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True, figsize=[20, 10])
def main_EB(ncomps=30, knn=20, v0_random_seed=24):
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'],
'NS': ['KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1', 'NPAS1', 'LHX2', 'NR2F1',
'NPAS1', 'DMRT3', 'LMX1A',
'NKX2-8', 'EN2', 'SOX1', 'PAX6', 'ZBTB16'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'], 'Pre-NE': ['POU5F1', 'OTX2'],
'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'OLIG3', 'HOXD1', 'ZIC2', 'ZIC5', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6',
'HOXA2', 'HOXB2'], 'ESC': ['NANOG', 'POU5F1', 'OTX2'], 'Pre-NE': ['POU5F1', 'OTX2']}
marker_genes_list = []
for key in marker_genes_dict:
for item in marker_genes_dict[key]:
marker_genes_list.append(item)
v0_too_big = 0.3
v1_too_big = 0.05
root_user = 1
n_var_genes = 'no filtering for HVG' # 15000
print('ncomps, knn, n_var_genes, v0big, p1big, randomseed, time', ncomps, knn, n_var_genes, v0_too_big, v1_too_big,
v0_random_seed, time.ctime())
# data = data.drop(['Unnamed: 0'], axis=1)
TI_pcs = pd.read_csv(
'/home/shobi/Trajectory/Datasets/EB_Phate/PCA_TI_200_final.csv') # filtered, library normed, sqrt transform, scaled to unit variance/zero mean
TI_pcs = TI_pcs.values[:, 1:]
umap_pcs = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_umap_200_TuesAM.csv')
umap_pcs = umap_pcs.values[:, 1:]
# print('TI PC shape', TI_pcs.shape)
from scipy.io import loadmat
annots = loadmat(
'/home/shobi/Trajectory/Datasets/EB_Phate/EBdata.mat') # has been filtered but not yet normed (by library s
data = annots['data'].toarray() # (16825, 17580) (cells and genes have been filtered)
# print('data min max', np.max(data), np.min(data), data[1, 0:20], data[5, 250:270], data[1000, 15000:15050])
# loc_ = np.where((data < 1) & (data > 0))
temp = data[(data < 1) & (data > 0)]
# print('temp non int', temp)
time_labels = annots['cells'].flatten().tolist()
import scprep
dict_labels = {'Day 00-03': 0, 'Day 06-09': 2, 'Day 12-15': 4, 'Day 18-21': 6, 'Day 24-27': 8}
# print(annots.keys()) # (['__header__', '__version__', '__globals__', 'EBgenes_name', 'cells', 'data'])
gene_names_raw = annots['EBgenes_name'] # (17580, 1) genes
print(data.shape)
adata = sc.AnnData(data)
# time_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/labels_1.csv')
# time_labels = time_labels.drop(['Unnamed: 0'], axis=1)
# time_labels = time_labels['time']
# adata.obs['time'] = [str(i) for i in time_labels]
gene_names = []
for i in gene_names_raw:
gene_names.append(i[0][0])
adata.var_names = gene_names
adata.obs['time'] = [str(i) for i in time_labels]
# filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=5000, log=False) #dont take log
adata_umap = adata.copy()
# adata = adata[:, filter_result.gene_subset] # subset the genes
# sc.pp.normalize_per_cell(adata, min_counts=2) # renormalize after filtering
print('data max min BEFORE NORM', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
rowsums = adata.X.sum(axis=1)
# adata.X = adata.X / rowsums[:, np.newaxis]
# adata.X = sc.pp.normalize_total(adata, exclude_highly_expressed=True, max_fraction=0.05, inplace=False)['X'] #normalize after filtering
adata.X = sc.pp.normalize_total(adata, inplace=False)['X'] # normalize after filtering
print('data max min after NORM', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
adata.X = np.sqrt(adata.X) # follow Phate paper which doesnt take log1() but instead does sqrt() transformation
adata_umap.X = np.sqrt(adata_umap.X)
print('data max min after SQRT', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
# sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
phate_operator = phate.PHATE(n_jobs=-1)
Y_phate = phate_operator.fit_transform(adata.X)
scprep.plot.scatter2d(Y_phate, c=time_labels, figsize=(12, 8), cmap="Spectral",
ticks=False, label_prefix="PHATE")
plt.show()
'''
Y_phate = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/EB_phate_embedding.csv')
Y_phate = Y_phate.values
scale = True
if scale == True:
print('pp scaled')
# sc.pp.scale(adata)
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X, axis=0)
sc.pp.scale(adata_umap)
print('data max min after SCALED', np.max(adata.X), np.min(adata.X))
else:
print('not pp scaled')
print('sqrt transformed')
# sc.pp.recipe_zheng17(adata, n_top_genes=15000) #expects non-log data
# g = sc.tl.rank_genes_groups(adata, groupby='time', use_raw=True, n_genes=10)#method='t-test_overestim_var'
# sc.pl.rank_genes_groups_heatmap(adata, n_genes=3, standard_scale='var')
'''
pcs = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_200_matlab.csv')
pcs = pcs.drop(['Unnamed: 0'], axis=1)
pcs = pcs.values
print(time.ctime())
ncomps = 50
input_data =pcs[:, 0:ncomps]
'''
print('v0_toobig, p1_toobig, v0randomseed', v0_too_big, v1_too_big, v0_random_seed)
print('do pca')
# sc.tl.pca(adata, svd_solver='arpack', n_comps=200, random_state = 0)
# sc.tl.pca(adata_umap, svd_solver='arpack', n_comps=200)
# df_pca_TI_200 = pd.DataFrame(adata.obsm['X_pca'])
# df_pca_TI_200.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_TI_200_TuesAM.csv')
# df_pca_umap_200 = pd.DataFrame(adata_umap.obsm['X_pca'])
# df_pca_umap_200.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_umap_200_TuesAM.csv')
adata.obsm['X_pca'] = TI_pcs
adata_umap.obsm['X_pca'] = umap_pcs
input_data = adata.obsm['X_pca'][:, 0:ncomps]
'''
#plot genes vs clusters for each trajectory
df_plot_gene = pd.DataFrame(adata.X, columns=[i for i in adata.var_names])
df_plot_gene = df_plot_gene[marker_genes_list]
previous_p1_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_labels_knn20_pc100_seed20.csv')
title_str = 'Terminal state 27 (Cardiac)'
gene_groups = ['ESC', 'PS/ME','EN','Cardiac']
clusters = [43,41,16,14,12,27]
'''
u_knn = 15
repulsion_strength = 1
n_pcs = 10
print('knn and repel', u_knn, repulsion_strength)
U = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_pc10_knn15.csv')
U = U.values[:, 1:]
U = Y_phate
# U = umap.UMAP(n_neighbors=u_knn, random_state=1, repulsion_strength=repulsion_strength).fit_transform(adata_umap.obsm['X_pca'][:, 0:n_pcs])
#print('start palantir', time.ctime())
# run_palantir_EB(adata, knn=knn, ncomps=ncomps, tsne=U, str_true_label=[str(i) for i in time_labels])
#print('end palantir', time.ctime())
# df_U = pd.DataFrame(U)
# df_U.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_pc10_knn15.csv')
print('do v0')
v0 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=v0_too_big, root_user=root_user, dataset='EB', random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True) # *.4 root=1,
v0.run_VIA()
super_labels = v0.labels
v0_labels_df = pd.DataFrame(super_labels, columns=['v0_labels'])
v0_labels_df.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/p0_labels.csv')
adata.obs['via0'] = [str(i) for i in super_labels]
'''
df_temp1 = pd.DataFrame(adata.X, columns = [i for i in adata.var_names])
df_temp1 = df_temp1[marker_genes_list]
df_temp1['via0']=[str(i) for i in super_labels]
df_temp1 = df_temp1.groupby('via0').mean()
'''
# sns.clustermap(df_temp1, vmin=-1, vmax=1,xticklabels=True, yticklabels=True, row_cluster= False, col_cluster=True)
# sc.pl.matrixplot(adata, marker_genes_dict, groupby='via0', vmax=1, vmin =-1, dendrogram=True)
'''
sc.tl.rank_genes_groups(adata, groupby='via0', use_raw=True,
method='t-test_overestim_var', n_genes=5) # compute differential expression
sc.pl.rank_genes_groups_heatmap(adata, groupby='via0',vmin=-3, vmax=3) # plot the result
'''
p = hnswlib.Index(space='l2', dim=input_data.shape[1])
p.init_index(max_elements=input_data.shape[0], ef_construction=100, M=16)
p.add_items(input_data)
p.set_ef(30)
tsi_list = get_loc_terminal_states(v0, input_data)
v1 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=v1_too_big, is_coarse=False,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned, full_distance_array=v0.full_distance_array,
full_neighbor_array=v0.full_neighbor_array,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='EB',
super_terminal_clusters=v0.terminal_clusters, random_seed=v0_random_seed)
v1.run_VIA()
# adata.obs['parc1'] = [str(i) for i in v1.labels]
# sc.pl.matrixplot(adata, marker_genes, groupby='parc1', dendrogram=True)
labels = v1.labels
'''
df_labels = pd.DataFrame({'v0_labels':v0.labels,'p1_labels':v1.labels})
df_labels['sub_TS'] = [1 if i in v1.terminal_clusters else 0 for i in v1.labels]
df_labels['super_TS'] = [1 if i in v0.terminal_clusters else 0 for i in v0.labels]
df_labels.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_labels_knn20_pc100_seed20.csv')
df_temp2 = pd.DataFrame(adata.X, columns=[i for i in adata.var_names])
df_temp2 = df_temp2[marker_genes_list]
df_temp2['parc1'] = [str(i) for i in labels]
df_temp2 = df_temp2.groupby('parc1').mean()
df_temp2.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_groupbyParc1_knn20_pc100_seed20.csv')
'''
adata.obs['parc1'] = [str(i) for i in labels]
# df_ts = pd.DataFrame(adata.X, columns = [i for i in adata.var_names])
# df_ts = df_ts[marker_genes_list]
# df_ts['parc1'] = [str(i) for i in labels]
adata.obs['terminal_state'] = ['True' if i in v1.terminal_clusters else 'False' for i in labels]
# df_ts = df_ts[df_ts['terminal_state']=='True']
adata_TS = adata[adata.obs['terminal_state'] == 'True']
# sns.clustermap(df_temp1, vmin=-1, vmax=1, xticklabels=True, yticklabels=True, row_cluster=False, col_cluster=True)
sc.pl.matrixplot(adata, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True)
# sc.pl.matrixplot(adata_TS, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True)
# U = umap.UMAP(n_neighbors=10, random_state=0, repulsion_strength=repulsion_strength).fit_transform(input_data[:, 0:n_pcs])
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(U[:, 0], U[:, 1], c=time_labels, s=5, cmap='viridis', alpha=0.5)
ax2.scatter(U[:, 0], U[:, 1], c=v1.single_cell_pt_markov, s=5, cmap='viridis', alpha=0.5)
plt.title('repulsion and knn and pcs ' + str(repulsion_strength) + ' ' + str(u_knn) + ' ' + str(
n_pcs) + ' randseed' + str(v0_random_seed))
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(labels)))
draw_trajectory_gams(U, super_clus_ds_PCA_loc, labels, super_labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, time_labels, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
knn_hnsw = make_knn_embeddedspace(U)
draw_sc_evolution_trajectory_dijkstra(v1, U, knn_hnsw, v0.full_graph_shortpath,
idx=np.arange(0, input_data.shape[0]))
plt.show()
def main_mESC(knn=30, v0_random_seed=42, cluster_graph_pruning_std=.0, run_palantir_func=False):
import random
rand_str = 950 # random.randint(1, 999)
print('rand string', rand_str)
print('knn', knn)
data_random_seed = 20
root = '0.0'
type_germ = 'Meso'
normalize = True
data = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/mESC_' + type_germ + '_markers.csv')
print('counts', data.groupby('day').count())
# print(data.head())
print(data.shape)
n_sub = 7000
print('type,', type_germ, 'nelements', n_sub, 'v0 randseed', v0_random_seed)
title_string = 'randstr:' + str(rand_str) + ' Knn' + str(knn) + ' nelements:' + str(n_sub) + ' ' + 'meso'
# data = data[data['day']!=0]
v0_too_big = 0.3
p1_too_big = 0.15 # .15
print('v0 and p1 too big', v0_too_big, p1_too_big)
data_sub = data[data['day'] == 0.0]
np.random.seed(data_random_seed)
idx_sub = np.random.choice(a=np.arange(0, data_sub.shape[0]), size=min(n_sub, data_sub.shape[0]), replace=False,
p=None) # len(true_label)
data_sub = data_sub.values[idx_sub, :]
data_sub = pd.DataFrame(data_sub, columns=data.columns)
for i in [1.0, 2, 2.5, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0]:
sub = data[data['day'] == i]
print(sub.shape[0])
np.random.seed(data_random_seed)
idx_sub = np.random.choice(a=np.arange(0, sub.shape[0]), size=min(n_sub, sub.shape[0]), replace=False,
p=None) # len(true_label)
sub = sub.values[idx_sub, :]
print('size of sub', sub.shape)
sub = pd.DataFrame(sub, columns=data.columns)
data_sub = pd.concat([data_sub, sub], axis=0, ignore_index=True, sort=True)
true_label = data_sub['day']
true_type = data_sub['type']
data = data_sub.drop(['day', 'Unnamed: 0', 'type'], axis=1)
# print('after subbing', data.head)
cols = ['Sca-1', 'CD41', 'Nestin', 'Desmin',
'CD24', 'FoxA2', 'Oct4', 'CD45', 'Ki67', 'Vimentin',
'Nanog', 'pStat3-705', 'Sox2', 'Flk-1', 'Tuj1',
'H3K9ac', 'Lin28', 'PDGFRa', 'EpCAM', 'CD44',
'GATA4', 'Klf4', 'CCR9', 'p53', 'SSEA1', 'IdU', 'Cdx2'] # 'bCatenin'
meso_good = ['CD24', 'FoxA2', 'Oct4', 'CD45', 'Ki67', 'Vimentin', 'Cdx2', 'CD54', 'pStat3-705', 'Sox2', 'Flk-1',
'Tuj1', 'SSEA1', 'H3K9ac', 'Lin28', 'PDGFRa', 'bCatenin', 'EpCAM', 'CD44', 'GATA4', 'Klf4', 'CCR9',
'p53']
marker_genes_ecto = ['Oct4', 'Nestin', 'CD45', 'Vimentin', 'Cdx2', 'Flk-1', 'PDGFRa', 'CD44',
'GATA4', 'CCR9', 'CD54', 'CD24', 'CD41', 'Tuji']
marker_genes_meso_paper_sub = ['Oct4', 'CD54', 'SSEA1', 'Lin28', 'Cdx2', 'CD45', 'Nanog', 'Sox2', 'Flk-1', 'Tuj1',
'PDGFRa', 'EpCAM', 'CD44', 'CCR9', 'GATA4']
marker_genes_meso_paper = ['Nestin', 'FoxA2', 'Oct4', 'CD45', 'Sox2', 'Flk-1', 'Tuj1', 'PDGFRa', 'EpCAM', 'CD44',
'GATA4', 'CCR9', 'Nanog', 'Cdx2', 'Vimentin'] # 'Nanog''Cdx2','Vimentin'
marker_genes_endo = ['Sca-1''Nestin', 'CD45', 'Vimentin', 'Cdx2', 'Flk-1', 'PDGFRa', 'CD44',
'GATA4', 'CCR9', 'CD54', 'CD24', 'CD41', 'Oct4']
marker_genes_meso = ['Sca-1', 'CD41', 'Nestin', 'Desmin', 'CD24', 'FoxA2', 'Oct4', 'CD45', 'Ki67', 'Vimentin',
'Cdx2', 'Nanog', 'pStat3-705', 'Sox2', 'Flk-1', 'Tuj1', 'H3K9ac', 'Lin28', 'PDGFRa', 'EpCAM',
'CD44', 'GATA4', 'Klf4', 'CCR9', 'p53', 'SSEA1', 'bCatenin', 'IdU'] # ,'c-Myc'
marker_dict = {'Ecto': marker_genes_ecto, 'Meso': marker_genes_meso, 'Endo': marker_genes_meso}
marker_genes = marker_dict[type_germ]
data = data[marker_genes]
print('marker genes ', marker_genes)
pre_fac_scale = [4, 1,
1] # 4,1,1 (4,1,1 is used in the paper but actually no scaling factor is really required, the results are unperturbed
pre_fac_scale_genes = ['H3K9ac', 'Lin28', 'Oct4']
for pre_scale_i, pre_gene_i in zip(pre_fac_scale, pre_fac_scale_genes):
data[pre_gene_i] = data[pre_gene_i] / pre_scale_i
print('prescaled gene', pre_gene_i, 'by factor', pre_scale_i)
scale_arcsinh = 5
raw = data.values
raw = raw.astype(np.float)
raw_df = pd.DataFrame(raw, columns=data.columns)
raw = raw / scale_arcsinh
raw = np.arcsinh(raw)
# print(data.shape, raw.shape)
adata = sc.AnnData(raw)
adata.var_names = data.columns
# print(adata.shape, len(data.columns))
true_label_int = [i for i in true_label]
adata.obs['day'] = ['0' + str(i) if i < 10 else str(i) for i in true_label_int]
true_label_str = [str(i) for i in
true_label_int] # the way find_root works is to match any part of root-user to majority truth
print(adata.obs['day'])
if normalize == True:
sc.pp.scale(adata, max_value=5)
print(colored('normalized', 'blue'))
else:
print(colored('NOT normalized', 'blue'))
print('adata', adata.shape)
# ncomps = 30
# sc.tl.pca(adata, svd_solver='arpack', n_comps=ncomps)
n_umap = adata.shape[0]
np.random.seed(data_random_seed)
udata = adata.X[:, :][0:n_umap]
# U = umap.UMAP().fit_transform(udata)
# U_df = pd.DataFrame(U, columns=['x', 'y'])
# U_df.to_csv('/home/shobi/Trajectory/Datasets/mESC/umap_89782cells_meso.csv')
idx = np.arange(0, adata.shape[
0]) # np.random.choice(a=np.arange(0, adata.shape[0]), size=adata.shape[0], replace=False, p=None) # len(true_label)
# idx=np.arange(0, len(true_label_int))
U = pd.read_csv(
'/home/shobi/Trajectory/Datasets/mESC/umap_89782cells_meso.csv') # umap_89782cells_7000each_Randseed20_meso.csv')
# U = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/phate_89782cells_mESC.csv')
U = U.values[0:len(true_label), 1:]
plt.scatter(U[:, 0], U[:, 1], c=true_label, cmap='jet', s=4, alpha=0.7)
plt.show()
'''
for gene_i in ['CD44', 'GATA4', 'PDGFRa', 'EpCAM']:
# subset = adata[[gene_i]].values #scale is not great so hard to visualize on the raw data expression
subset = adata[:, gene_i].X.flatten()
plt.scatter(U[:, 0], U[:, 1], c=subset, cmap='viridis', s=4, alpha=0.7)
plt.title(gene_i)
plt.show()
'''
print(U.shape)
# U_df = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/phate_89782cells_mESC.csv')
# U = U_df.drop('Unnamed: 0', 1)
U = U[idx, :]
# subsample start
n_subsample = len(true_label_int) # 50000 #palantir wont scale
U = U[0:n_subsample, :]
# phate_operator = phate.PHATE(n_jobs=-1)
# Y_phate = phate_operator.fit_transform(adata.X)
# phate_df = pd.DataFrame(Y_phate)
# phate_df.to_csv('/home/shobi/Trajectory/Datasets/mESC/phate_89782cells_mESC.csv')
true_label_int0 = list(np.asarray(true_label_int))
# Start Slingshot data prep
'''
slingshot_annots = true_label_int0[0:n_umap]
slingshot_annots = [int(i) for i in slingshot_annots]
Slingshot_annots = pd.DataFrame(slingshot_annots,columns = ['label'])
Slingshot_annots.to_csv('/home/shobi/Trajectory/Datasets/mESC/Slingshot_annots_int_10K_sep.csv')
Slingshot_data = pd.DataFrame(adata.X[0:n_umap], columns=marker_genes)
Slingshot_data.to_csv('/home/shobi/Trajectory/Datasets/mESC/Slingshot_input_data_10K_sep.csv')
# print('head sling shot data', Slingshot_data.head)
# print('head sling shot annots', Slingshot_annots.head)
print('slingshot data shape', Slingshot_data.shape)
# sling_adata =sc.AnnData(Slingshot_data)
'''
# end Slingshot data prep
adata = adata[idx]
true_label_int = list(np.asarray(true_label_int)[idx])
true_label_int = true_label_int[0:n_subsample]
true_label_str = list(np.asarray(true_label_str)[idx])
true_label_str = true_label_str[0:n_subsample]
true_type = list(np.asarray(true_type)[idx])
true_type = list(np.asarray(true_type)[idx])[0:n_subsample]
sc.tl.pca(adata, svd_solver='arpack', n_comps=20)
# plt.scatter(sling_adata.obsm['X_pca'][:,0],sling_adata.obsm['X_pca'][:,1], c = Slingshot_annots['label'])
plt.show()
print('time', time.ctime())
loc_start = np.where(np.asarray(true_label_int) == 0)[0][0]
adata.uns['iroot'] = loc_start
print('iroot', loc_start)
# Start PAGA
'''
sc.pp.neighbors(adata, n_neighbors=knn, n_pcs=28) # 4
sc.tl.draw_graph(adata)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata, n_comps=28)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
#sc.pp.neighbors(adata, n_neighbors=knn, use_rep='X_diffmap') # 4
#sc.tl.draw_graph(adata)
sc.tl.leiden(adata, resolution=1.0, random_state=10)
sc.tl.paga(adata, groups='leiden')
adata.obs['group_id'] = true_label_int
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata, n_dcs=28)
print('time paga end', time.ctime())
plt.show()
df_paga_dpt = pd.DataFrame()
df_paga_dpt['paga_dpt'] = adata.obs['dpt_pseudotime'].values
df_paga_dpt['days'] = true_label_int
df_paga_dpt.to_csv('/home/shobi/Trajectory/Datasets/mESC/paga_dpt_knn' + str(knn) + '.csv')
sc.pl.paga(adata, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden', 'group_id', 'pseudotime'])
plt.show()
# sc.pl.matrixplot(adata, marker_genes_meso, groupby='day', dendrogram=True)
'''
# end PAGA
'''
#start palantir run
t_pal_start = time.time()
run_palantir_mESC(adata[0:n_subsample:], knn=knn, tsne=U, str_true_label = true_label_str)
print('palantir run time', round(time.time() - t_pal_start))
df_palantir = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/palantir_pt.csv')
df_palantir['days'] = true_label_int
df_palantir.to_csv('/home/shobi/Trajectory/Datasets/mESC/palantir_pt.csv')
'''
# df_ = pd.DataFrame(adata.X)
# df_.columns = [i for i in adata.var_names]
# df_.to_csv('/home/shobi/Trajectory/Datasets/mESC/transformed_normalized_input.csv')
df_ = pd.DataFrame(true_label_int, columns=['days'])
# df_.to_csv('/home/shobi/Trajectory/Datasets/mESC/annots_days.csv')
print('finished saving for monocle3')
v0 = VIA(adata.X, true_label_int, jac_std_global=0.3, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v0_too_big, resolution_parameter=2,
root_user=root, dataset='mESC', random_seed=v0_random_seed,
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=3,
do_impute_bool=True, is_coarse=True, preserve_disconnected=False, pseudotime_threshold_TS=40, x_lazy=0.99,
alpha_teleport=0.99) # *.4 root=1,
v0.run_VIA()
df_pt = v0.single_cell_pt_markov
f, (ax1, ax2,) = plt.subplots(1, 2, sharey=True)
s_genes = ''
for s in marker_genes:
s_genes = s_genes + ' ' + s
plt.title(str(len(true_label)) + 'cells ' + str(title_string) + '\n marker genes:' + s_genes, loc='left')
ax1.scatter(U[:, 0], U[:, 1], c=true_label_int, cmap='jet', s=4, alpha=0.7)
ax2.scatter(U[:, 0], U[:, 1], c=df_pt, cmap='jet', s=4, alpha=0.7)
print('SAVED TRUE')
df_pt = pd.DataFrame()
df_pt['via_knn'] = v0.single_cell_pt_markov
df_pt['days'] = true_label_int
df_pt.to_csv('/home/shobi/Trajectory/Datasets/mESC/noMCMC_nolazynotele_via_pt_knn_Feb2021' + str(
knn) + 'resolution2jacp15.csv')
adata.obs['via0'] = [str(i) for i in v0.labels]
# show geneplot
# sc.pl.matrixplot(adata, marker_genes, groupby='via0', dendrogram=True)
super_labels = v0.labels
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
c_pt = v0.single_cell_pt_markov[0:n_umap]
print('draw trajectory for v0')
draw_trajectory_gams(U, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, c_pt, true_label_int, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=28)
'''
#show geneplot
for gene_i in ['CD44', 'GATA4', 'PDGFRa', 'EpCAM']:
# subset = data[[gene_i]].values
subset = adata[:, gene_i].X.flatten()
print('gene expression for', gene_i)
v0.get_gene_expression(subset, gene_i)
plt.show()
'''
tsi_list = get_loc_terminal_states(v0, adata.X)
v1 = VIA(adata.X, true_label_int, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=p1_too_big, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root, is_coarse=False,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='mESC',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=3,
super_terminal_clusters=v0.terminal_clusters, random_seed=v0_random_seed,
full_neighbor_array=v0.full_neighbor_array, full_distance_array=v0.full_distance_array,
ig_full_graph=v0.ig_full_graph, csr_array_locally_pruned=v0.csr_array_locally_pruned,
pseudotime_threshold_TS=40)
v1.run_VIA()
df_pt['via_v1'] = v1.single_cell_pt_markov
df_pt.to_csv('/home/shobi/Trajectory/Datasets/mESC/noMCMC_nolazynotele_via_pt_knn_Feb2021' + str(
knn) + 'resolution2jacp15.csv')
adata.obs['parc1'] = [str(i) for i in v1.labels]
sc.pl.matrixplot(adata, marker_genes, groupby='parc1', dendrogram=True)
labels = v1.labels
for gene_i in ['CD44', 'GATA4', 'PDGFRa', 'EpCAM']:
# subset = data[[gene_i]].values
subset = adata[:, gene_i].X.flatten()
print('gene expression for', gene_i)
v1.get_gene_expression(subset, gene_i)
# X = adata.obsm['X_pca'][:,0:2]
# print(X.shape)
c_pt = v1.single_cell_pt_markov[0:n_umap]
c_type = true_type[0:n_umap]
dict_type = {'EB': 0, 'Endo': 5, "Meso": 10, 'Ecto': 15}
c_type = [dict_type[i] for i in c_type]
u_truelabel = true_label_int[0:n_umap]
# U = umap.UMAP().fit_transform(adata.obsm['X_pca'][idx, 0:ncomps])
# U = Y_phate[idx,:]
print('umap done', rand_str, time.ctime())
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
s_genes = ''
for s in marker_genes:
s_genes = s_genes + ' ' + s
plt.title(str(len(true_label)) + 'cells ' + str(title_string) + '\n marker genes:' + s_genes, loc='left')
ax1.scatter(U[:, 0], U[:, 1], c=true_label_int, cmap='jet', s=4, alpha=0.7)
ax2.scatter(U[:, 0], U[:, 1], c=c_pt, cmap='jet', s=4, alpha=0.7)
ax3.scatter(U[:, 0], U[:, 1], c=c_type, cmap='jet', s=4, alpha=0.7)
plt.show()
knn_hnsw = make_knn_embeddedspace(U)
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(labels)))
true_label_formatted = [int(10 * i) for i in u_truelabel]
draw_trajectory_gams(U, super_clus_ds_PCA_loc, labels, super_labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, c_pt, true_label_int, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=28)
# draw_sc_evolution_trajectory_dijkstra(v1, U, knn_hnsw, v0.full_graph_shortpath, np.arange(0, n_umap))
plt.show()
def main_scATAC_zscores(knn=20, ncomps=30, cluster_graph_pruning_std=.15):
# datasets can be downloaded from the link below
# https://nbviewer.jupyter.org/github/pinellolab/STREAM/blob/master/tutorial/archives/v0.4.1_and_earlier_versions/4.STREAM_scATAC-seq_k-mers.ipynb?flush_cache=true
# these are the kmers based feature matrix
# https://www.dropbox.com/sh/zv6z7f3kzrafwmq/AACAlU8akbO_a-JOeJkiWT1za?dl=0
# https://github.com/pinellolab/STREAM_atac
##KMER START
df = pd.read_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/zscore_scaled_kmer.tsv",
sep='\t') # TF Zcores from STREAM NOT the original buenrostro corrected PCs
df = df.transpose()
print('df kmer size', df.shape)
new_header = df.iloc[0] # grab the first row for the header
df = df[1:] # take the data less the header row
df.columns = new_header # set the header row as the df header
df = df.apply(pd.to_numeric) # CONVERT ALL COLUMNS
true_label = pd.read_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/cell_label.csv", sep='\t')
true_label = true_label['cell_type'].values
cell_types = ['GMP', 'HSC', 'MEP', 'CLP', 'CMP', 'LMPP', 'MPP', 'pDC', 'mono']
cell_dict = {'pDC': 'purple', 'mono': 'yellow', 'GMP': 'orange', 'MEP': 'red', 'CLP': 'aqua',
'HSC': 'black', 'CMP': 'moccasin', 'MPP': 'darkgreen', 'LMPP': 'limegreen'}
### KMER end
### for MOTIFS start
'''
df = pd.read_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/pinellolab_chromVAR_buenrostro_motifs_noHSC0828.csv",sep=',') # TF Zcores from STREAM NOT the original buenrostro corrected PCs
cell_annot = df["Unnamed: 0"].values
df = df.drop('Unnamed: 0', 1)
print('nans', np.sum(df.isna().sum()))
df = df.interpolate()
print('nans', df.isna().sum())
#df = pd.read_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/zscore_scaled_transpose.csv",sep=',')
print(df.head, df.shape)
cell_types = ['GMP', 'HSC', 'MEP', 'CLP', 'CMP', 'LMuPP', 'MPP', 'pDC', 'mono', 'UNK']
cell_dict = {'pDC': 'purple', 'mono': 'yellow', 'GMP': 'orange', 'MEP': 'red', 'CLP': 'aqua',
'HSC': 'black', 'CMP': 'moccasin', 'MPP': 'darkgreen', 'LMuPP': 'limegreen','UNK':'gray'}
true_label = []
found_annot=False
count = 0
for annot in cell_annot:
for cell_type_i in cell_types:
if cell_type_i in annot:
true_label.append(cell_type_i)
if found_annot== True: print('double count', annot)
found_annot = True
if found_annot ==False:
true_label.append('unknown')
print('annot is unknown', annot)
count = count+1
found_annot=False
'''
## FOR MOTIFS end
print('true label', true_label)
print('len true label', len(true_label))
df_Buen = pd.read_csv('/home/shobi/Trajectory/Datasets/scATAC_Hemato/scATAC_hemato_Buenrostro.csv', sep=',')
# df.to_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/zscore_scaled_transpose.csv")
df = df.reset_index(drop=True)
df_num = df.values
df_num = pd.DataFrame(df_num, columns=new_header)
print('df_num', df_num.head)
df_num = df_num.apply(pd.to_numeric)
df_num['true'] = true_label
print(df_num.groupby('true', as_index=False).mean())
print('df', df.head(), df.shape)
print(df.columns.tolist()[0:10])
for i in ['AGATAAG', 'CCTTATC']:
if i in df.columns: print(i, ' is here')
ad = sc.AnnData(df)
ad.var_names = df.columns
ad.obs['cell_type'] = true_label
sc.tl.pca(ad, svd_solver='arpack', n_comps=300)
color = []
for i in true_label:
color.append(cell_dict[i])
# PCcol = ['PC1', 'PC2', 'PC3', 'PC4', 'PC5']
# embedding = umap.UMAP(n_neighbors=15, random_state=2, repulsion_strength=0.5).fit_transform(ad.obsm['X_pca'][:, 1:5])
# embedding = umap.UMAP(n_neighbors=20, random_state=2, repulsion_strength=0.5).fit_transform(df_Buen[PCcol])
# df_umap = pd.DataFrame(embedding)
# df_umap.to_csv('/home/shobi/Trajectory/Datasets/scATAC_Hemato/embedding_5PC.csv')
embedding = | pd.read_csv('/home/shobi/Trajectory/Datasets/scATAC_Hemato/embedding_5PC.csv') | pandas.read_csv |
from fctest.__EISData__ import EISData
import pandas as pd
import os
class G20EISData(EISData):
ENCODING = "ISO-8859-1"
def __init__(self, data_path):
path = os.path.normpath(data_path)
raw_data = pd.read_csv(path, sep='\t', encoding=self.ENCODING)
# split the relevant sections of the data
data_section = raw_data.iloc[48:, 2:].reset_index(drop=True)
col_names = raw_data.iloc[46, 2:].values
units_section = raw_data.iloc[47, 2:].values
data_section.columns = col_names
test_date = pd.to_datetime(raw_data.iloc[2, 2] + ' ' + raw_data.iloc[3, 2])
mea_area = float(raw_data.iloc[12, 2])
initial_freq = float(raw_data.iloc[8, 2])
final_freq = float(raw_data.iloc[9, 2])
pts_per_decade = float(raw_data.iloc[10, 2])
# relevant parameters
freqs = pd.to_numeric(data_section.Freq).values
z_mod = pd.to_numeric(data_section.Zmod).values
z_real = | pd.to_numeric(data_section.Zreal) | pandas.to_numeric |
import numpy as np
import yt
import trident
import pandas as pd
from salsa.absorber_extractor import AbsorberExtractor
from salsa.utils.collect_files import collect_files, check_rays
from salsa.utils.functions import ion_p_num
from salsa.generate_light_rays import generate_lrays
from mpi4py import MPI
from yt.data_objects.static_output import \
Dataset
def generate_catalog(ds_file, n_rays,
ray_directory,
ion_list,
method="spice",
center=None,
impact_param_lims=(0, 200),
ray_length=200,
field_parameters={},
fields=[],
ftype='gas',
cut_region_filters=[],
extractor_kwargs={},
units_dict={}):
"""
Generates a catalog of absorber properties from a given number of lightrays
through a dataset. Will look if lrays have already been made, otherwise will
create them by uniform randomly sampling impact parameter. Uses OpenMPI to
split up light ray creation and absorber extraction among processors.
Parameters
----------
ds_file: str or dataset
either path to a dataset or a loaded dataset
n_rays: int
number of rays to sample
ray_directory: str
path to directory where rays loaded from or will be saved if they haven't
been constructed
ion_list: list str
list of ions to find absorbers from.
method: "spice" or "spectacle", optional
Choose which method to use to extract absorbers.
center: list or array, optional
The center of the galaxy in units 'code_length'. If None, defaults to
domain_center.
Default: None
impact_param_lims: tuple or list, optional
The range on which to sample impact parameter when constructing lightrays
Default: (0, 200)
ray_length: float, optional
The length of each light ray in units kpc.
Default: 200
field_parameters: dict, optional
The parameters that will be passed to trident during ray construction.
This can be something like "bulk_velocity" so that a radial velocity can
be saved.
fields: list of str
YT fields to add to lightrays. Will be included in catalog if "spice" method
is selected
ftype : str
The field to be passed to trident that ion fields will be added to, i.e.
``('gas', 'H_p0_number_density')``. ``'gas'`` should work for most grid-based
simulations. For particle-based simulations this will not work and needs
to be changed. ``'PartType0'`` often works though it varies.
See ``trident.add_ion_fields()`` for more information
cut_region_filters: list of strings, optional
a list of filters defined by the way you use Cut Regions in YT
Default: None
extractor_kwargs: dict, optional
Additional key word arguments to pass to the absorber_extractor to
modify default extraction parameters. Either a single dict that will be
passed for each ion. Or a dict of ions pointing toward individual extractor
kwargs. Examples:
``extractor_kwargs={'H I':{'absorber_min':14}, 'C IV':{'absorber_min':13}, 'O VI':{}}``
``extractor_kwargs={'absorber_min':13.5}``
The first will set different absober mins for each ion, with O VI taking
default as specified by ``salsa.utils.defaults.default_cloud_dict``. The
second example will set the minimum absorber as 13.5 for every ion.
**NOTE** you cannot mix the two formats. If one ion is specified then
all ions must be specified (see 'O VI' included even though it's dictionary is empty)
Default: {}
units_dict: dict, optional
dictionary of units to use for the fields when extracting properties
(only relevant for 'spice' method)
Default: None
Returns
-------
full_catalog: pandas.DataFrame
pandas dataframe containing all of the absorbers extracted from all
the lightrays. If no absorbers are found, None is returned
"""
comm = MPI.COMM_WORLD
# check ds_file is need to load
if isinstance(ds_file, str):
ds = yt.load(ds_file)
elif isinstance(ds_file, Dataset):
ds = ds_file
# add ion number density to fields to check
check_fields = fields.copy()
for i in ion_list:
check_fields.append(ion_p_num(i))
#check if rays already made
check =check_rays(ray_directory, n_rays, check_fields)
my_ray_bool= np.array([check], dtype=int)
ray_bool = np.array([0], dtype=int)
# share if rays made already or not
comm.Barrier()
comm.Allreduce([my_ray_bool, MPI.INT],[ray_bool, MPI.INT], op=MPI.LAND)
#generate rays randomly
if not ray_bool[0]:
#set a center
if center is None:
center=ds.domain_center
#construct random rays in ray_directory
generate_lrays(ds, center,
n_rays, impact_param_lims[1],
min_impact_param=impact_param_lims[0],
length=ray_length,
fld_params=field_parameters,
ion_list=ion_list,
fields=fields,
ftype=ftype,
out_dir=ray_directory)
comm.Barrier()
#Extract Absorbers
#collect and split up ray files
ray_files = np.array(collect_files(ray_directory, key_words=['ray']), dtype=str)
ray_files_split = np.array_split(ray_files, comm.size)
my_rays = ray_files_split[ comm.rank ]
#add directory path to rays
my_ray_files=[ ray_directory+'/'+r for r in my_rays ]
#create catalog for each ion
df_list=[]
for ion in ion_list:
#check if extractor kwargs has ion specific information
if ion in extractor_kwargs.keys():
curr_kwargs = extractor_kwargs[ion]
else:
curr_kwargs=extractor_kwargs.copy()
# setup absorber extractor
abs_ext = AbsorberExtractor(ds, my_ray_files[0], ion_name=ion,
cut_region_filters=cut_region_filters,
**curr_kwargs)
# get catalogs
my_df = get_absorbers(abs_ext, my_ray_files, method, fields=fields, units_dict=units_dict)
if my_df is not None:
df_list.append(my_df)
# Return Nonetype if no absorbers found
if df_list == []:
my_catalog = None
else:
my_catalog= | pd.concat(df_list, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import numpy as np
import pandas as pd
from imctools.io.mcdparser import McdParser
from imctools.io.abstractparserbase import AcquisitionError
from imctools.io.imcfolderwriter import ImcFolderWriter
from xml.etree import cElementTree as ElementTree
from .logger import logger
class MCD:
def __init__(self, mcdpath: Path):
self.mcdpath = mcdpath
self.imc_name = mcdpath.stem
self.acquisitions = {}
@staticmethod
def _get_acquisition(mcd, ac_id):
try:
imc_ac = mcd.get_imc_acquisition(ac_id)
except AcquisitionError as e:
imc_ac = None
return imc_ac
def load_acquisitions(self):
logger.debug("Loading acquisitions. Make take some time...")
for ac_id in self.mcd.acquisition_ids:
imc_ac = self._get_acquisition(self.mcd, ac_id)
if imc_ac is None:
continue
#imc_ac.original_metadata = ElementTree.fromstring(imc_ac.original_metadata)
#imc_ac.original_metadata = imc_ac.original_metadata.decode("ascii")
self.acquisitions[ac_id] = imc_ac
logger.info(f"{len(self.acquisitions)} acquisitions loaded.")
def peek(self):
logger.info(f"Going to peek inside MCD file {self.mcdpath}")
logger.debug("Loading MCD.")
self.mcd = McdParser(str(self.mcdpath))
logger.debug("MCD loaded. Peeking started.")
acquisition_ids = self.mcd.acquisition_ids
self.acquisition_ids = []
self.offsets = {}
self.n_channels = {}
self.channel_metals = {}
self.channel_labels = {}
for ac_id in acquisition_ids:
ac = self.mcd.meta.get_acquisitions()[ac_id]
if ac.data_offset_end - ac.data_offset_start < 1e5:
logger.warn(f"Acquisition {ac_id} appears empty. Skipping.")
continue
metals, labels = list(zip(*self.mcd.get_acquisition_channels(ac_id).values()))
metals = [m.replace("(", "").replace(")", "") for m in metals]
offset = len(metals) - len(set(metals) - set("XYZ"))
self.offsets[ac_id] = offset
self.channel_labels[ac_id] = labels[offset:]
self.channel_metals[ac_id] = metals[offset:]
self.n_channels[ac_id] = len(metals[offset:])
self.acquisition_ids.append(ac_id)
logger.debug("Peeking finished.")
def load_mcd(self):
self.fileprefix = self.mcdpath.stem
self.peek()
self.load_acquisitions()
self.n_acquisitions = len(self.acquisitions)
def get_xyz_data(self, ac_id):
imc_ac = self.acquisitions.get(ac_id)
return imc_ac._data[:3]
def get_data(self, ac_id, ch_int=None):
imc_ac = self.acquisitions.get(ac_id)
offset = imc_ac._offset
if ch_int is not None:
return imc_ac._data[offset + ch_int]
return imc_ac._data[offset:]
def set_data(self, new_data, ac_id, ch_int=None):
imc_ac = self.acquisitions.get(ac_id)
offset = imc_ac._offset
if ch_int is not None:
imc_ac._data[offset + ch_int] = new_data
else:
assert len(new_data.shape) == 3
imc_ac._data[offset:] = new_data
def _write_imcfolder(self, acquisitions, prefix, suffix):
raise NotImplementedError("This is broken")
logger.debug(f"Saving IMCfolder with prefix:[{prefix}] and suffix:[{suffix}]")
# TODO:This doesn't utilize acquisitions yet
outpath = Path(prefix + suffix)
if not outpath.exists():
outpath.mkdir(exist_ok=True)
self.mcd.save_meta_xml(str(outpath))
ifw = ImcFolderWriter(str(outpath), mcddata=self.mcd)
ifw.write_imc_folder()
logger.info(f"IMC-Folder written to {str(outpath)}")
def _write_tiff(self, acquisitions, prefix, suffix):
logger.debug(f"Saving tiffs with prefix:[{prefix}] and suffix:[{suffix}]")
outpath = Path(prefix + suffix)
if not outpath.exists():
outpath.mkdir(exist_ok=True)
for ac_id in acquisitions.keys():
subdir = outpath / f"{prefix}{suffix}.a{ac_id}"
if not subdir.exists():
subdir.mkdir(exist_ok=True)
fmt = "{0}/{1}{2}.a{3}/{1}{2}.a{3}.{4}.{5}.ome.tiff"
for ac_id, channel_list in acquisitions.items():
imc_ac = self.acquisitions.get(ac_id)
for ch_id, metal, label in channel_list:
tiff = fmt.format(outpath, prefix, suffix, ac_id, metal, label)
iw = imc_ac.get_image_writer(filename=str(tiff), metals=[metal])
iw.save_image(mode="ome", compression=0, dtype=None, bigtiff=False)
logger.debug(f"{tiff} saved.")
logger.info(f"All tiffs saved.")
def _write_tiffstack(self, acquisitions, prefix, suffix):
logger.debug(f"Saving tiffstack with prefix:[{prefix}] and suffix:[{suffix}]")
fmt = "{}{}.a{}.ome.tiff"
for ac_id in acquisitions.keys():
tiff = fmt.format(prefix, suffix, ac_id)
imc_ac = self.acquisitions.get(ac_id)
iw = imc_ac.get_image_writer(filename=str(tiff))
iw.save_image(mode="ome", compression=0, dtype=None, bigtiff=False)
logger.debug(f"{tiff} saved.")
logger.info(f"All tiffstacks saved.")
def _write_text(self, acquisitions, prefix, suffix):
logger.debug(f"Saving text data with prefix:[{prefix}] and suffix:[{suffix}]")
fmt = "{}{}.a{}.txt"
for ac_id, channel_list in acquisitions.items():
logger.debug(f"Creating text data for acquisition {ac_id}...")
outfile = fmt.format(prefix, suffix, ac_id)
ch_ids, ch_metals, ch_labels = map(list, zip(*channel_list))
xyz = self.get_xyz_data(ac_id)
xyz = xyz[:].reshape(3, -1).T
logger.debug(f"XYZ data size for {ac_id}: {xyz.shape}")
data = self.get_data(ac_id)[ch_ids]
_n = data.shape[0]
data = data[:].reshape(_n, -1).T
logger.debug(f"Channel data size for {ac_id}: {data.shape}")
logger.debug(f"Merging XYZ data with channel data.")
data = np.column_stack((xyz, data))
size = data.nbytes
metals = [f"{metal}({label})" for _, metal, label in channel_list]
data = | pd.DataFrame(data, columns=["X", "Y", "Z"] + metals) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import io
import os
import pickle
import math
import numpy as np
import pandas as pd
import lightgbm as lgb
from tqdm.auto import tqdm
class ScoringService(object):
# 訓練期間終了日
TRAIN_END_HIGH = '2019-12-01'
TRAIN_END_LOW = '2018-12-31'
TRAIN_END = '2018-12-31'
# 評価期間開始日
VAL_START = '2019-02-01'
# 評価期間終了日
VAL_END = '2019-12-01'
# テスト期間開始日
TEST_START = '2020-01-01'
# 目的変数
TARGET_LABELS = ['label_high_20', 'label_low_20']
# データをこの変数に読み込む
dfs = None
# モデルをこの変数に読み込む
models = None
# 対象の銘柄コードをこの変数に読み込む
codes = None
@classmethod
def getCodes(cls):
return cls.codes
@classmethod
def get_inputs(cls, dataset_dir):
"""
Args:
dataset_dir (str) : path to dataset directory
Returns:
dict[str]: path to dataset files
"""
inputs = {
'stock_list': f'{dataset_dir}/stock_list.csv.gz',
'stock_price': f'{dataset_dir}/stock_price.csv.gz',
'stock_fin': f'{dataset_dir}/stock_fin.csv.gz',
# 'stock_fin_price': f'{dataset_dir}/stock_fin_price.csv.gz',
'stock_labels': f'{dataset_dir}/stock_labels.csv.gz',
}
return inputs
@classmethod
def get_dataset(cls, inputs):
"""
Args:
inputs (list[str]): path to dataset files
Returns:
dict[pd.DataFrame]: loaded data
"""
if cls.dfs is None:
cls.dfs = {}
for k, v in inputs.items():
cls.dfs[k] = pd.read_csv(v)
# DataFrameのindexを設定します。
if k == "stock_price":
cls.dfs[k].loc[:, "datetime"] = pd.to_datetime(cls.dfs[k].loc[:, "EndOfDayQuote Date"])
cls.dfs[k].set_index("datetime", inplace=True)
elif k in ["stock_fin", "stock_fin_price", "stock_labels"]:
cls.dfs[k].loc[:, "datetime"] = pd.to_datetime(cls.dfs[k].loc[:, "base_date"])
cls.dfs[k].set_index("datetime", inplace=True)
return cls.dfs
@classmethod
def get_codes(cls, dfs):
"""
Args:
dfs (dict[pd.DataFrame]): loaded data
Returns:
array: list of stock codes
"""
stock_list = dfs['stock_list'].copy()
# 予測対象の銘柄コードを取得
cls.codes = stock_list[stock_list['prediction_target'] == True]['Local Code'].values
@classmethod
def get_features_and_label(cls, dfs, codes, feature, label):
"""
Args:
dfs (dict[pd.DataFrame]): loaded data
codes (array) : target codes
feature (pd.DataFrame): features
label (str) : label column name
Returns:
train_X (pd.DataFrame): training data
train_y (pd.DataFrame): label for train_X
val_X (pd.DataFrame): validation data
val_y (pd.DataFrame): label for val_X
test_X (pd.DataFrame): test data
test_y (pd.DataFrame): label for test_X
"""
# 分割データ用の変数を定義
trains_X, vals_X, tests_X = [], [], []
trains_y, vals_y, tests_y = [], [], []
# 銘柄コード毎に特徴量を作成
print(label,' Create Feature value')
for code in tqdm(codes):
# 特徴量取得
feats = feature[feature['code'] == code]
# 特定の銘柄コードのデータに絞る
stock_labels = dfs['stock_labels'][dfs['stock_labels']['Local Code'] == code].copy()
# 特定の目的変数に絞る
labels = stock_labels[label]
# nanを削除
labels.dropna(inplace=True)
if feats.shape[0] > 0 and labels.shape[0] > 0:
# 特徴量と目的変数のインデックスを合わせる
labels = labels.loc[labels.index.isin(feats.index)]
feats = feats.loc[feats.index.isin(labels.index)]
labels.index = feats.index
# データを分割
_train_X = {}
_val_X = {}
_test_X = {}
_train_y = {}
_val_y = {}
_test_y = {}
if label == 'label_high_20':
_train_X = feats[: cls.TRAIN_END_HIGH].copy()
_val_X = feats[cls.VAL_START : cls.VAL_END].copy()
_test_X = feats[cls.TEST_START :].copy()
_train_y = labels[: cls.TRAIN_END_HIGH].copy()
_val_y = labels[cls.VAL_START : cls.VAL_END].copy()
_test_y = labels[cls.TEST_START :].copy()
elif label == 'label_low_20':
_train_X = feats[: cls.TRAIN_END_LOW].copy()
_val_X = feats[cls.VAL_START : cls.VAL_END].copy()
_test_X = feats[cls.TEST_START :].copy()
_train_y = labels[: cls.TRAIN_END_LOW].copy()
_val_y = labels[cls.VAL_START : cls.VAL_END].copy()
_test_y = labels[cls.TEST_START :].copy()
else:
_train_X = feats[: cls.TRAIN_END].copy()
_val_X = feats[cls.VAL_START : cls.VAL_END].copy()
_test_X = feats[cls.TEST_START :].copy()
_train_y = labels[: cls.TRAIN_END].copy()
_val_y = labels[cls.VAL_START : cls.VAL_END].copy()
_test_y = labels[cls.TEST_START :].copy()
# データを配列に格納 (後ほど結合するため)
trains_X.append(_train_X)
vals_X.append(_val_X)
tests_X.append(_test_X)
trains_y.append(_train_y)
vals_y.append(_val_y)
tests_y.append(_test_y)
# 銘柄毎に作成した説明変数データを結合します。
train_X = pd.concat(trains_X)
val_X = pd.concat(vals_X)
test_X = pd.concat(tests_X)
# 銘柄毎に作成した目的変数データを結合します。
train_y = pd.concat(trains_y)
val_y = pd.concat(vals_y)
test_y = pd.concat(tests_y)
return train_X, train_y, val_X, val_y, test_X, test_y
#増加率の計算
@classmethod
def get_Rate_of_increase(cls, df):
df_return_1 = df.shift(1)
return (df - df_return_1) / df_return_1
@classmethod
def get_features_for_predict(cls, dfs, code, label, start_dt='2016-01-01'):
"""
Args:
dfs (dict) : dict of pd.DataFrame include stock_fin, stock_price
code (int) : A local code for a listed company
start_dt (str): specify date range
Returns:
feature DataFrame (pd.DataFrame)
"""
# 特徴量の作成には過去60営業日のデータを使用しているため、
# 予測対象日からバッファ含めて土日を除く過去90日遡った時点から特徴量を生成します
n = 90
# 特定の銘柄コードのデータに絞る
fin_data = dfs['stock_fin'][dfs['stock_fin']['Local Code'] == code]
# 特徴量の生成対象期間を指定
fin_data = fin_data.loc[pd.Timestamp(start_dt) - pd.offsets.BDay(n) :]
#データを取得
fin_feats = fin_data[['Result_FinancialStatement FiscalYear']].copy()
fin_feats['Result_FinancialStatement NetSales'] = fin_data['Result_FinancialStatement NetSales']
fin_feats['Result_FinancialStatement OperatingIncome'] = fin_data['Result_FinancialStatement OperatingIncome']
fin_feats['Result_FinancialStatement OrdinaryIncome'] = fin_data['Result_FinancialStatement OrdinaryIncome']
fin_feats['Result_FinancialStatement NetIncome'] = fin_data['Result_FinancialStatement NetIncome']
fin_feats['Result_FinancialStatement TotalAssets'] = fin_data['Result_FinancialStatement TotalAssets']
fin_feats['Result_FinancialStatement NetAssets'] = fin_data['Result_FinancialStatement NetAssets']
fin_feats['Result_FinancialStatement CashFlowsFromOperatingActivities'] = fin_data['Result_FinancialStatement CashFlowsFromOperatingActivities']
fin_feats['Result_FinancialStatement CashFlowsFromFinancingActivities'] = fin_data['Result_FinancialStatement CashFlowsFromFinancingActivities']
fin_feats['Result_FinancialStatement CashFlowsFromInvestingActivities'] = fin_data['Result_FinancialStatement CashFlowsFromInvestingActivities']
fin_feats['Forecast_FinancialStatement FiscalYear'] = fin_data['Forecast_FinancialStatement FiscalYear']
fin_feats['Forecast_FinancialStatement NetSales'] = fin_data['Forecast_FinancialStatement NetSales']
fin_feats['Forecast_FinancialStatement OperatingIncome'] = fin_data['Forecast_FinancialStatement OperatingIncome']
fin_feats['Forecast_FinancialStatement OrdinaryIncome'] = fin_data['Forecast_FinancialStatement OrdinaryIncome']
fin_feats['Forecast_FinancialStatement NetIncome'] = fin_data['Forecast_FinancialStatement NetIncome']
fin_feats['Result_Dividend FiscalYear'] = fin_data['Result_Dividend FiscalYear']
fin_feats['Result_Dividend QuarterlyDividendPerShare'] = fin_data['Result_Dividend QuarterlyDividendPerShare']
fin_feats['Forecast_Dividend FiscalYear'] = fin_data['Forecast_Dividend FiscalYear']
fin_feats['Forecast_Dividend QuarterlyDividendPerShare'] = fin_data['Forecast_Dividend QuarterlyDividendPerShare']
fin_feats['Forecast_Dividend AnnualDividendPerShare'] = fin_data['Forecast_Dividend AnnualDividendPerShare']
fin_feats['Result_FinancialStatement ReportType'] = fin_data['Result_FinancialStatement ReportType']
fin_feats['Result_FinancialStatement ReportType'].replace(['Q1','Q2','Q3','Annual',],[0,1,2,3],inplace=True)
# 欠損値処理
fin_feats = fin_feats.fillna(0)
# 特定の銘柄コードのデータに絞る
price_data = dfs['stock_price'][dfs['stock_price']['Local Code'] == code]
# 特徴量の生成対象期間を指定
price_data = price_data.loc[pd.Timestamp(start_dt) - pd.offsets.BDay(n) :]
# 終値のみに絞る
feats = price_data[['EndOfDayQuote ExchangeOfficialClose']].copy()
#高値と安値の差額
price_data['Stock price difference'] = price_data['EndOfDayQuote High'] - price_data['EndOfDayQuote Low']
#騰落幅。前回終値と直近約定値の価格差
feats['EndOfDayQuote ChangeFromPreviousClose'] = price_data['EndOfDayQuote ChangeFromPreviousClose']
#騰落値
feats['EndOfDayQuote RisingAndFallingPrices'] = price_data['EndOfDayQuote PreviousClose'] + price_data['EndOfDayQuote ChangeFromPreviousClose']
#累積調整係数
feats['EndOfDayQuote CumulativeAdjustmentFactor'] = price_data['EndOfDayQuote CumulativeAdjustmentFactor']
#過去0,5,10,15,20日前の株価、出来高
for nn in range(0, 21, 5):
nn_str = str(nn)
#高値
feats['EndOfDayQuote High Return' + nn_str] = price_data['EndOfDayQuote High'].shift(nn)
#安値
feats['EndOfDayQuote Low Return' + nn_str] = price_data['EndOfDayQuote Low'].shift(nn)
#始値
feats['EndOfDayQuote Open Return' + nn_str] = price_data['EndOfDayQuote Open'].shift(nn)
#終値
feats['EndOfDayQuote Close Return' + nn_str] = price_data['EndOfDayQuote Close'].shift(nn)
#売買高
feats['EndOfDayQuote Volume Return' + nn_str] = price_data['EndOfDayQuote Volume'].shift(nn)
#銘柄情報
list_data = dfs['stock_list'][dfs['stock_list']['Local Code'] == code].copy()
#銘柄の33業種区分(コード)
feats['33 Sector(Code)'] = list_data['33 Sector(Code)'].values[0]
#銘柄の17業種区分(コード)
feats['17 Sector(Code)'] = list_data['17 Sector(Code)'].values[0]
#発行済株式数
feats['IssuedShareEquityQuote IssuedShare'] = list_data['IssuedShareEquityQuote IssuedShare'].values[0]
#Size Code (New Index Series)
list_data['Size Code (New Index Series)'] = list_data['Size Code (New Index Series)'].replace('-', 0).astype(int)
million = 1000000
#来期の予測EPS(1株あたりの利益)
forecast_EPS = (fin_feats['Forecast_FinancialStatement NetIncome'] * million) / feats['IssuedShareEquityQuote IssuedShare']
#feats['Forecast EPS'] = forecast_EPS
#来期の予測PER(株価収益率)
feats['Forecast PER ExchangeOfficialClose'] = price_data['EndOfDayQuote ExchangeOfficialClose'] / forecast_EPS
#売買高加重平均価格(VWAP)
feats['EndOfDayQuote VWAP'] = price_data['EndOfDayQuote VWAP']
# 財務データの特徴量とマーケットデータの特徴量のインデックスを合わせる
feats = feats.loc[feats.index.isin(fin_feats.index)]
fin_feats = fin_feats.loc[fin_feats.index.isin(feats.index)]
# データを結合
feats = pd.concat([feats, fin_feats], axis=1).dropna()
#決算種別gごとに分ける
#Q1
q1 = feats.loc[feats['Result_FinancialStatement ReportType'] == 0].copy()
#Q2
q2 = feats.loc[feats['Result_FinancialStatement ReportType'] == 1].copy()
#Q3
q3 = feats.loc[feats['Result_FinancialStatement ReportType'] == 2].copy()
#Annual
annual = feats.loc[feats['Result_FinancialStatement ReportType'] == 3].copy()
#決算
settlement = fin_data[['Forecast_FinancialStatement ReportType']].copy()
settlement['Forecast_FinancialStatement ReportType'].replace(['Q1','Q2','Q3','Annual',],[0,1,2,3],inplace=True)
settlement['Forecast_FinancialStatement FiscalYear'] = fin_data['Forecast_FinancialStatement FiscalYear']
settlement['Forecast_FinancialStatement NetSales'] = fin_data['Forecast_FinancialStatement NetSales']
settlement['Forecast_FinancialStatement OperatingIncome'] = fin_data['Forecast_FinancialStatement OperatingIncome']
settlement['Result_FinancialStatement OperatingIncome'] = fin_data['Result_FinancialStatement OperatingIncome']
#前の行と値が同じかどうか、同じならTrueを格納
settlement['Forecast_FinancialStatement ReportType Flag'] = settlement['Forecast_FinancialStatement ReportType'].eq(settlement['Forecast_FinancialStatement ReportType'].shift(1))
settlement['Forecast_FinancialStatement FiscalYear Flag'] = settlement['Forecast_FinancialStatement FiscalYear'].eq(settlement['Forecast_FinancialStatement FiscalYear'].shift(1))
#0,1に変換
settlement['Forecast_FinancialStatement ReportType Flag'] = settlement['Forecast_FinancialStatement ReportType Flag'] * 1
settlement['Forecast_FinancialStatement FiscalYear Flag'] = settlement['Forecast_FinancialStatement FiscalYear Flag'] * 1
#実行フラグを立てる
settlement['Execution flag'] = ((settlement['Forecast_FinancialStatement ReportType Flag'] == 1) & (settlement['Forecast_FinancialStatement FiscalYear Flag'] == 1))
#実行フラグがTrueなら値を格納
settlement['Forecast_FinancialStatement NetSales Shift'] = 0
settlement['Forecast_FinancialStatement NetSales Shift'].where(settlement['Execution flag'] != True, settlement['Forecast_FinancialStatement NetSales'].shift(1), inplace=True)
settlement['Forecast_FinancialStatement OperatingIncome Shift'] = 0
settlement['Forecast_FinancialStatement OperatingIncome Shift'].where(settlement['Execution flag'] != True, settlement['Forecast_FinancialStatement OperatingIncome'].shift(1), inplace=True)
settlement['Result_FinancialStatement OperatingIncome Shift'] = 0
settlement['Result_FinancialStatement OperatingIncome Shift'].where(settlement['Execution flag'] != True, settlement['Result_FinancialStatement OperatingIncome'].shift(1), inplace=True)
#負債
liabilities = feats['Result_FinancialStatement TotalAssets'] - feats['Result_FinancialStatement NetAssets']
#AnnualのEPS(1株当たり利益)
annual_EPS = (annual['Result_FinancialStatement NetIncome'] * million) / list_data['IssuedShareEquityQuote IssuedShare'].values[0]
if label == 'label_high_20':
#Size Code (New Index Series)
feats['Size Code (New Index Series)'] = list_data['Size Code (New Index Series)'].values[0]
#Annual純利益増加率
annual['Annual Net income increase rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement NetIncome'])
#欠損値処理を行います。
annual = annual.replace([np.nan], 0)
feats['Annual Net income increase rate'] = annual['Annual Net income increase rate']
#Q1,Q2,Q3,Annualの営業利益増加率
q1['Q1 Operating income increase rate'] = cls.get_Rate_of_increase(q1['Result_FinancialStatement OperatingIncome'])
q2['Q2 Operating income increase rate'] = cls.get_Rate_of_increase(q2['Result_FinancialStatement OperatingIncome'])
q3['Q3 Operating income increase rate'] = cls.get_Rate_of_increase(q3['Result_FinancialStatement OperatingIncome'])
annual['Annual Operating income increase rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement OperatingIncome'])
#欠損値処理を行います。
q1 = q1.replace([np.nan], 0)
q2 = q2.replace([np.nan], 0)
q3 = q3.replace([np.nan], 0)
annual = annual.replace([np.nan], 0)
feats['Q1 Operating income increase rate'] = q1['Q1 Operating income increase rate']
feats['Q2 Operating income increase rate'] = q2['Q2 Operating income increase rate']
feats['Q3 Operating income increase rate'] = q3['Q3 Operating income increase rate']
feats['Annual Operating income increase rate'] = annual['Annual Operating income increase rate']
#Q1,Q2,Q3,Annualの当期純利益増加率
q1['Q1 Net income increase rate'] = cls.get_Rate_of_increase(q1['Result_FinancialStatement NetIncome'])
q2['Q2 Net income increase rate'] = cls.get_Rate_of_increase(q2['Result_FinancialStatement NetIncome'])
q3['Q3 Net income increase rate'] = cls.get_Rate_of_increase(q3['Result_FinancialStatement NetIncome'])
annual['Annual Net income increase rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement NetIncome'])
#欠損値処理を行います。
q1 = q1.replace([np.nan], 0)
q2 = q2.replace([np.nan], 0)
q3 = q3.replace([np.nan], 0)
annual = annual.replace([np.nan], 0)
feats['Q1 Net income increase rate'] = q1['Q1 Net income increase rate']
feats['Q2 Net income increase rate'] = q2['Q2 Net income increase rate']
feats['Q3 Net income increase rate'] = q3['Q3 Net income increase rate']
feats['Annual Net income increase rate'] = annual['Annual Net income increase rate']
#PER(株価収益率)
feats['Annual PER'] = price_data['EndOfDayQuote ExchangeOfficialClose'] / annual_EPS
#決算営業利益増加率
feats['Settlement operating income increase rate'] = (settlement['Result_FinancialStatement OperatingIncome'] - settlement['Result_FinancialStatement OperatingIncome Shift']) / settlement['Result_FinancialStatement OperatingIncome Shift']
#欠損値処理を行います。
feats = feats.replace([np.nan], -99999)
#来期決算種別
feats['Forecast_FinancialStatement ReportType'] = settlement['Forecast_FinancialStatement ReportType']
#来期の予想決算売上高増加率
feats['Expected settlement of accounts for the next fiscal year Sales increase rate'] = (settlement['Forecast_FinancialStatement NetSales'] - settlement['Forecast_FinancialStatement NetSales Shift']) / settlement['Forecast_FinancialStatement NetSales Shift']
#売上高増加率
feats['Sales growth rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement NetSales'])
#営業利益増加率
feats['Operating income increase rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement OperatingIncome'])
#経常利益増加率
feats['Ordinary income increase rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement OrdinaryIncome'])
#BPS(1株あたりの純資産)
BPS = (feats['Result_FinancialStatement NetAssets'] * million) / feats['IssuedShareEquityQuote IssuedShare']
#PBR(株価純資産倍率)
feats['PBR'] = feats['EndOfDayQuote ExchangeOfficialClose'] / BPS
#CFPS(1株あたりのキャッシュフロー)
CFPS = (feats['Result_FinancialStatement CashFlowsFromOperatingActivities'] * million) / feats['IssuedShareEquityQuote IssuedShare']
#PCFR(株価キャッシュフロー倍率)
feats['PCFR'] = feats['EndOfDayQuote ExchangeOfficialClose'] / CFPS
#来期の予測配当利回り
feats['Forecast Dividend yield'] = feats['Forecast_Dividend AnnualDividendPerShare'] / feats['EndOfDayQuote ExchangeOfficialClose']
#時価総額
feats['Market capitalization'] = (feats['EndOfDayQuote ExchangeOfficialClose'] * million) * feats['IssuedShareEquityQuote IssuedShare']
#キャッシュフローマージン
feats['Forecast Cash flow margin'] = feats['Result_FinancialStatement CashFlowsFromOperatingActivities'] / feats['Forecast_FinancialStatement NetSales']
#高値と安値の5日間の差額の平均
feats['Stock price difference Mean 5'] = price_data['Stock price difference'].rolling(5).mean()
#5日間平均から当日株価を引く
EndOfDayQuote_ExchangeOfficialClose_Mean_5 = price_data['EndOfDayQuote ExchangeOfficialClose'].rolling(5).mean()
feats['Subtract the current days stock price from the 5-day average'] = EndOfDayQuote_ExchangeOfficialClose_Mean_5 - feats['EndOfDayQuote ExchangeOfficialClose']
#売上高に対しての負債割合
feats['Ratio of sales to liabilities'] = liabilities / feats['Result_FinancialStatement NetSales']
#負債増加率
feats['Debt growth rate'] = cls.get_Rate_of_increase(liabilities)
#終値の20営業日ボラティリティ
feats['20 business days volatility'] = (np.log(price_data['EndOfDayQuote ExchangeOfficialClose']).diff().rolling(20).std())
#終値の40営業日ボラティリティ
feats['40 business days volatility'] = (np.log(price_data['EndOfDayQuote ExchangeOfficialClose']).diff().rolling(40).std())
#終値の60営業日ボラティリティ
feats['60 business days volatility'] = (np.log(price_data['EndOfDayQuote ExchangeOfficialClose']).diff().rolling(60).std())
#終値の20営業日リターン
feats['20 business day return'] = price_data['EndOfDayQuote ExchangeOfficialClose'].pct_change(20)
#ドロップ
for nn in range(0, 21, 5):
nn_str = str(nn)
feats = feats.drop(['EndOfDayQuote High Return' + nn_str], axis=1)
feats = feats.drop(['EndOfDayQuote Low Return' + nn_str], axis=1)
feats = feats.drop(['EndOfDayQuote Open Return' + nn_str], axis=1)
feats = feats.drop(['EndOfDayQuote Close Return' + nn_str], axis=1)
elif label == 'label_low_20':
#Q1,Q2,Q3,Annualの売上高増加率
q1['Q1 Sales growth rate'] = cls.get_Rate_of_increase(q1['Result_FinancialStatement NetSales'])
q2['Q2 Sales growth rate'] = cls.get_Rate_of_increase(q2['Result_FinancialStatement NetSales'])
q3['Q3 Sales growth rate'] = cls.get_Rate_of_increase(q3['Result_FinancialStatement NetSales'])
annual['Annual Sales growth rate'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement NetSales'])
#欠損値処理を行います。
q1 = q1.replace([np.nan], 0)
q2 = q2.replace([np.nan], 0)
q3 = q3.replace([np.nan], 0)
annual = annual.replace([np.nan], 0)
feats['Q1 Sales growth rate'] = q1['Q1 Sales growth rate']
feats['Q2 Sales growth rate'] = q2['Q2 Sales growth rate']
feats['Q3 Sales growth rate'] = q3['Q3 Sales growth rate']
feats['Annual Sales growth rate'] = annual['Annual Sales growth rate']
#Annual財務キャッシュフロー増加率
annual['Annual Rate of increase in financial cash flow'] = cls.get_Rate_of_increase(annual['Result_FinancialStatement CashFlowsFromFinancingActivities'])
#欠損値処理を行います。
annual = annual.replace([np.nan], 0)
feats['Annual Rate of increase in financial cash flow'] = annual['Annual Rate of increase in financial cash flow']
#Annual EPS(1株当たり利益)
feats['Annual EPS'] = annual_EPS
#欠損値処理を行います。
feats = feats.replace([np.nan], -99999)
#来期の予想決算営業利益増加率
feats['Expected settlement of accounts for the next fiscal year Operating income increase rate'] = (settlement['Forecast_FinancialStatement OperatingIncome'] - settlement['Forecast_FinancialStatement OperatingIncome Shift']) / settlement['Forecast_FinancialStatement OperatingIncome Shift']
#負債比率
feats['Debt ratio'] = liabilities / feats['Result_FinancialStatement NetAssets']
#利益率
Profit_rate = feats['Result_FinancialStatement NetIncome'] / feats['Result_FinancialStatement NetSales']
#利益率増加率
feats['Profit margin increase rate'] = cls.get_Rate_of_increase(Profit_rate)
#自己資本比率
feats['equity_ratio'] = feats['Result_FinancialStatement NetAssets'] / feats['Result_FinancialStatement TotalAssets']
#純利益増加率
feats['Net income increase rate'] = cls.get_Rate_of_increase(feats['Result_FinancialStatement NetIncome'])
#EPS(1株当たり利益)
EPS = feats['Result_FinancialStatement NetIncome'] / feats['IssuedShareEquityQuote IssuedShare']
#PER(株価収益率)
PER = price_data['EndOfDayQuote ExchangeOfficialClose'] / EPS
#目標株価
feats['Target stock price'] = EPS * PER
#ドロップ
feats = feats.drop(['EndOfDayQuote RisingAndFallingPrices','Result_FinancialStatement TotalAssets',
'Result_FinancialStatement CashFlowsFromOperatingActivities',
'Forecast_Dividend QuarterlyDividendPerShare','Result_FinancialStatement CashFlowsFromFinancingActivities',
'Forecast_FinancialStatement FiscalYear','Result_Dividend FiscalYear',
'Forecast_FinancialStatement NetIncome', 'Forecast_FinancialStatement OperatingIncome',
'Forecast_FinancialStatement NetSales','Result_FinancialStatement OrdinaryIncome',], axis=1)
feats = feats.drop(['EndOfDayQuote ExchangeOfficialClose',], axis=1)
# 欠損値処理を行います。
feats = feats.replace([np.inf, -np.inf, np.nan], 0)
# 銘柄コードを設定
feats['code'] = code
# 生成対象日以降の特徴量に絞る
feats = feats.loc[ | pd.Timestamp(start_dt) | pandas.Timestamp |
"""
author : <NAME> (email: <EMAIL>)
visit: (https://jfayaz.github.io)
------------------------------ Instructions -------------------------------------
This code downloads the Hazard Curves and Deaggregation information of a given the
Site Location provided with other inputs.
You may run this code in python IDE: 'Spyder' or any other similar IDE
Make sure you have the following python libraries installed:
pandas
numpy
urllib.request
string
openpyxl
xlsxwriter
requests
json
INPUT:
The input data must be provided in form of Excel file as per the given 'Input Data.xlsx' file
The name of the excel file must be kept as 'Input Data.xlsx'
Row 1 of the file must contain the titles as follows:
Edition Region Longitude Latitude Period vs30 Return Period
The input data must be provided starting from row 2 of the sheet with the required
values under each title. More than 1 rows can be provided as the data
E.g. the example file 'Input Data.xlsx' contains input for 2 sites
Following are the options that can be provided under each title:
Edition (USGS edition) : '2008' , '2014'
Region : 'COUS', 'WUS', 'CEUS' ; {WUS: Western US, CEUS: Central Eastern US}
Longitude : Longitude of the Site
Latitude : Latitude of the Site
Period : First Mode Period of the Structure (sec) (Downloads PGA if Period = 0)
Note: Closest results to the available USGS periods will be provided. USGS has results only for PGA, SA @ 0.2 sec, SA @ 1.0 sec, SA @ 2 sec
vs30 (Shear-Wave Velocity) : Shear wave velocity at 30 meters at the site (m/s)
Note: Closest results to the available USGS vs30s will be provided. USGS has results only for '180', '259', '360', '537', '760', '1150' ; {in m/s , restricted to only these values}
Return Period (Hazard) : Return Period Hazard (in years)
Note: Closest results to the available USGS Hazard Levels will be provided. USGS has results only for '475', '975', '2475'
OUTPUT:
The output will be provided in a saperate Excel file 'Output Data.xlsx' for each input
The file will contain 2 sheets:
1) 'Hazard Curves' sheet will contain information about the Hazard Curves at 0.2 sec, 1 sec and 2 secs
The output will have titles:
Acceleration (g) lambda PGA lambda Sa at 0.2 sec lambda Sa at 1 sec lambda Sa at 2 sec
2) 'Deaggregation' sheet will contain information about the deaggregation of the site at given imt level
The output will have two saparate tables showing the deaggregation of faults from 'Gutenberg-Richter (gr)' and 'Characteristic (ch)' branches of the USGS logic tree. They both must be added weightedly to attain total deaggregation
Each table will have titles:
source r m ε longitude latitude azimuth % contribution
Note: If a USGS branch other than 'afault' and 'bfault' is used in deaggregation, the results wont be provided for now!
You are welcome to make the additions to the code to make it more exhaustive
%%%%% ========================================================================================================================================================================= %%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
##### ================== INPUTS ================== #####
Plot_Hazard_Curves = 'Yes'
##### ============ END OF USER INPUTS ============ #####
#########################################################
###%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#####
## Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
warnings.filterwarnings('error')
import warnings
warnings.simplefilter('once')#"error", "ignore", "always", "default", "module"
import process_hazard as haz
import process_deag as deag
# Reading given data
data = pd.read_excel('Input Data.xlsx', converters={'Edition':str,'Region':str,'imt':str},engine="openpyxl")
data = data.dropna()
print('\n\n'+str(len(data))+' sites identified in the input excel file.')
sfmt = '{Edition}/{Region}/{Longitude}/{Latitude}/{imt}/{vs30}/{Return Period}'.format
sfmt_2 = 'edition={Edition}®ion={Region}&longitude={Longitude}&latitude={Latitude}&imt={imt}&vs30={vs30}&returnperiod={Return Period}'.format
imt_list = ['PGA','SA0P1', 'SA0P2','SA0P5', 'SA0P75', 'SA1P0','SA2P0','SA3P0','SA4P0','SA5P0']
DF_Cols = ['λ_PGA', 'λ_Sa(T=0.1s)', 'λ_Sa(T=0.2s)','λ_Sa(T=0.5s)','λ_Sa(T=0.75s)','λ_Sa(T=1s)','λ_Sa(T=2s)','λ_Sa(T=3s)','λ_Sa(T=4s)','λ_Sa(T=5s)']
USGS_Sa_T = pd.DataFrame(columns=['T','imt'])
USGS_Sa_T['T'] = np.array([0, 0.10001, 0.20002, 0.50001, 0.750001, 1.0002, 2.0001, 3.0002, 4.0002, 5.0002])
USGS_Sa_T['imt'] = imt_list
USGS_RP = np.array([475,975,2475])
USGS_Vs30 = np.array([180.0001,259.0001,360.0001,537.0001,760.0001,1150.0001])
df = pd.DataFrame(columns=['Edition','Region','Longitude','Latitude','imt','vs30','Return Period'])
df['Edition'] = data['Edition'].apply(lambda x: 'E'+str(x))
df['Longitude'] = data['Longitude']
df['Latitude'] = data['Latitude']
diff_periods = data['Period'].apply(lambda x: abs(x-np.array(USGS_Sa_T['T'])))
diff_vs30 = data['vs30'].apply(lambda x: abs(x-USGS_Vs30))
diff_hazards = data['Return Period'].apply(lambda x: abs(x-USGS_RP))
df['Return Period'] = USGS_RP[diff_hazards.apply(lambda x: np.argmin(x))]
df['vs30'] = USGS_Vs30[diff_vs30.apply(lambda x: np.argmin(x))]
df['Region'] = data['Region']
Plot_Hazard_Curves = 'No'
Deag_data_avaliable = 'No'
for i in range(0,len(diff_periods)):
df.loc[i,'imt'] = USGS_Sa_T['imt'][np.argmin(diff_periods[i])]
#df['imt'].loc[i] = USGS_Sa_T['imt'][np.argmin(diff_periods[i])]
print('\n\n')
for ii in range(0,len(df)):
### ---------- HAZARD CURVES ---------- ###
print('\n\nChecking Hazard urls for Site Input {}...\n'.format(np.round(ii+1,0)))
lm=df[ii:ii+1].reset_index(drop=True)
Plot_Hazard_Curves,DF_HAZARD_CURVES = haz.url_haz_process(df,lm,imt_list,sfmt,sfmt_2,DF_Cols)
# Plotting Hazard Curves
if Plot_Hazard_Curves == 'Yes':
print('Downloading Hazard Curves for Site Input {}...\n'.format(np.round(ii+1,0)))
writer = pd.ExcelWriter('OutputData_Site'+str(ii+1)+'.xlsx',engine='xlsxwriter')
DF_HAZARD_CURVES.to_excel(writer,'Hazard Curves',startrow=4)
worksheet = writer.sheets['Hazard Curves']
worksheet.write('A1', 'Latitude')
worksheet.write('B1', lm['Latitude'][0])
worksheet.write('A2', 'Longitude')
worksheet.write('B2', lm['Longitude'][0])
worksheet.write('A3', 'Vs30 (m/s)')
worksheet.write('B3', lm['vs30'][0])
def plot_hazard(DF_HAZARD_CURVES,PlotTitle,lambdaType):
axes.plot(DF_HAZARD_CURVES['Acceleration (g)'], DF_HAZARD_CURVES[lambdaType] , '.-',lw=6,markersize=8)
axes.set_xlabel('Acceleration (g)',fontsize=30,fontweight='bold')
axes.set_ylabel('Rate of Exceedance',fontsize=30,fontweight='bold')
axes.set_yscale('log')
axes.set_title(PlotTitle,fontsize=40,fontweight='bold')
axes.tick_params(labelsize= 25)
axes.grid(True)
axes.set_xlim(0, np.ceil(max(DF_HAZARD_CURVES['Acceleration (g)'])))
axes.set_ylim(1/10**10,1)
axes.axhline(linewidth=10,color='black')
axes.axvline(linewidth=10,color='black')
#axes.hold(True)
#axes.legend(fontsize =30)
fig = plt.figure(ii+1,figsize=(18,12))
axes = fig.add_subplot(1, 1, 1)
for k in range(1,len(DF_HAZARD_CURVES.columns)):
plot_hazard(DF_HAZARD_CURVES,'Hazard Curve for Site ' + str(ii+1), DF_HAZARD_CURVES.columns[k])
savefigtext="./HazardCurves_Site"+str(ii+1)+".jpeg"
fig.savefig(savefigtext)
### ---------- DEAGGREGATION ---------- ###
print('\n\nChecking Deaggregation urls for Site Input {}...\n'.format(np.round(ii+1,0)))
lm=df[ii:ii+1].reset_index(drop=True)
# # Extracting sources from response
Deag_data_avaliable,data = deag.url_deag_process(lm,sfmt,sfmt_2)
if Plot_Hazard_Curves == 'Yes' and Deag_data_avaliable == 'Yes':
print('Downloading Deaggregation Results for Site Input {}...\n'.format(np.round(ii+1,0)))
# json data starts with response->data->sources
lx = pd.DataFrame.from_dict(data['response'][0]['data'][0]['sources'])
# Removing if contains pointsourcefinite
lx = lx[~lx['name'].str.contains("PointSourceFinite")]
epsilon = lx.columns[10]
# Rearrange columns
lx = lx[['name','source','r','m',epsilon,'longitude','latitude','azimuth','contribution']]
# Deleting source column
del lx['source']
lx = lx.reset_index(drop=True)
# Renaming column to source
lx = lx.rename(columns={'name':'source'})
#Getting indexes of faults
Fault_Name_idx = np.asarray(lx[lx.isnull().any(axis=1)].index)
Fault_Name_idx = np.append(Fault_Name_idx,[len(lx)],axis=0)
position1 = 0
position2 = 2
#Fault_Types = Fault_Types.dropna()
Fault_Types = pd.Series(dtype= | pd.StringDtype() | pandas.StringDtype |
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@pytest.fixture
def four_level_index_dataframe():
arr = np.array(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
]
)
index = MultiIndex(
levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
names=["one", "two", "three", "four"],
)
return DataFrame(arr, index=index, columns=list("ABCDE"))
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
# see gh-2903
arr = np.random.randn(4, 4)
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
def test_xs_values(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
tm.assert_series_equal(result, expected)
def test_xs_missing_values_in_index():
# see gh-6574
# missing values in returned index should be preserved
acc = [
("a", "abcde", 1),
("b", "bbcde", 2),
("y", "yzcde", 25),
("z", "xbcde", 24),
("z", None, 26),
("z", "zbcde", 25),
("z", "ybcde", 26),
]
df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
expected = DataFrame(
{"cnt": [24, 26, 25, 26]},
index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
)
result = df.xs("z", level="a1")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):
# see gh-13719
frame = multiindex_dataframe_random_data
df = concat([frame] * 2)
assert df.index.is_unique is False
expected = concat([frame.xs("one", level="second")] * 2)
result = df.xs(key, level=level)
tm.assert_frame_equal(result, expected)
def test_xs_level(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
expected = df[df.index.get_level_values(1) == "two"]
expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_frame_equal(result, expected)
def test_xs_level_eq_2():
arr = np.random.randn(3, 5)
index = MultiIndex(
levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
)
df = DataFrame(arr, index=index)
expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer",
[
lambda df: df.xs(("a", 4), level=["one", "four"]),
lambda df: df.xs("a").xs(4, level="four"),
],
)
def test_xs_level_multiple(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
expected_index = MultiIndex(
levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_setting_with_copy_error(multiindex_dataframe_random_data):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_setting_with_copy_error_multiple(four_level_index_dataframe):
# this is a copy in 0.14
df = four_level_index_dataframe
result = df.xs(("a", 4), level=["one", "four"])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_integer_key():
# see gh-2107
dates = range(20111201, 20111205)
ids = list("abcde")
index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
df = DataFrame(np.random.randn(len(index), 3), index, ["X", "Y", "Z"])
result = df.xs(20111201, level="date")
expected = df.loc[20111201, :]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
)
def test_xs_level0(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
]
expected_index = MultiIndex(
levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
codes=[[0, 1], [0, 1], [1, 0]],
names=["two", "three", "four"],
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
import torch
import torch.nn as nn
from src.models.loss import RMSELoss, RMSLELoss
from sklearn.metrics import r2_score
import pandas as pd
#########################
# EARLY STOPPING
#########################
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(
self,
patience=7,
verbose=False,
delta=0.005,
path="checkpoint.pt",
trace_func=print,
early_stop_delay=20,
):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
From https://github.com/Bjarten/early-stopping-pytorch
License: MIT
"""
self.patience = patience
self.verbose = verbose
self.early_stop_delay = early_stop_delay
self.counter = 0
self.epoch = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss, model):
score = -val_loss
# print(type(score), 'SCORE ####,', score)
if self.epoch < self.early_stop_delay:
self.epoch += 1
pass
else:
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
if self.verbose:
self.trace_func(
f"EarlyStopping counter: {self.counter} out of {self.patience}"
)
if self.counter >= self.patience:
self.early_stop = True
elif torch.isnan(score).item():
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
# print('########## IS NAN #######')
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
self.epoch += 1
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(
f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ..."
)
torch.save(model, self.path)
# torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
#########################
# TESTING
#########################
def test(net, x_test, device, batch_size=100, ):
with torch.no_grad():
y_hats = []
for i in range(0, len(x_test), batch_size):
batch_x = x_test[i:i+batch_size].to(device)
outputs = net(batch_x)
y_hats.append(np.array(outputs.cpu()).reshape(-1,1))
return torch.tensor(np.concatenate(y_hats))
def calc_r2_avg(y_hats, y_val, index_sorted, window_size):
y_hats_rolling_avg = np.convolve(np.array(y_hats[index_sorted]).reshape(-1), np.ones(window_size), 'valid') / window_size
r2_val_avg = r2_score(np.array(y_val)[index_sorted][window_size-1:], y_hats_rolling_avg)
return r2_val_avg, y_hats_rolling_avg
# function to create metrics from the test set on an already trained model
def model_metrics_test(net, model_path, x_test, y_test, device, window_size=12):
net.eval()
criterion_mae = nn.L1Loss()
criterion_rmse = RMSELoss()
criterion_rmsle = RMSLELoss()
results_dict = {}
try:
y_hats = test(net, x_test, device, 100)
index_sorted = np.array(np.argsort(y_test, 0).reshape(-1))
r2_test = r2_score(y_test, y_hats)
results_dict['r2_test'] = r2_test
r2_test_avg, y_hats_rolling_avg = calc_r2_avg(y_hats, y_test, index_sorted, window_size)
results_dict['r2_test_avg'] = r2_test_avg
loss_mae_test = criterion_mae(y_hats, y_test)
results_dict['loss_mae_test'] = loss_mae_test.item()
loss_rmse_test = criterion_rmse(y_hats, y_test)
results_dict['loss_rmse_test'] = loss_rmse_test.item()
loss_rmsle_test = criterion_rmsle(y_hats, y_test)
results_dict['loss_rmsle_test'] = loss_rmsle_test.item()
except:
results_dict['r2_test'] = 99999
results_dict['r2_test_avg'] = 99999
results_dict['loss_mae_test'] = 99999
results_dict['loss_rmse_test'] = 99999
results_dict['loss_rmsle_test'] = 99999
return results_dict
def test_metrics_to_results_df(model_folder, df_results, x_test, y_test, ):
'''Function that takes the results datafram and appends
the results from the test data to it.
Parameters
===========
model_folder : pathlib position
Folder holding all the saved checkpoint files of saved models
'''
# select device to run neural net on
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("Running on GPU")
else:
device = torch.device("cpu")
print("Running on CPU")
df_temp = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list( | pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern") | pandas.date_range |
import glob
import pandas as pd
from pathlib import Path
import os
import numpy as np
#####################################################
# Original method #
#####################################################
def create_dataset_original(data_folder):
path = Path(data_folder)
df_cancer = pd.concat(
[pd.read_csv(f, sep='\t', header=None, index_col=0) for f in glob.glob(rf'{path}/cancer/*.breakage')], axis=1
).dropna()
df_control = pd.concat(
[pd.read_csv(f, sep='\t', header=None, index_col=0) for f in glob.glob(rf'{path}/control/*.breakage')], axis=1
).dropna()
df = pd.concat([df_cancer, df_control], axis=1).dropna()
# df.columns = [i for i in range(df.shape[1])]
dataframes = [df, df_cancer, df_control]
return dataframes
#####################################################
# Custom methods #
#####################################################
def create_dataset_all_cpgs(data_folder):
path = Path(data_folder)
df_cancer = pd.concat(
[pd.read_csv(f, sep='\t', header=None, index_col=0) for f in glob.glob(rf'{path}/cancer/*.breakage')], axis=1
)
df_control = pd.concat(
[pd.read_csv(f, sep='\t', header=None, index_col=0) for f in glob.glob(rf'{path}/control/*.breakage')], axis=1
)
df = pd.concat([df_cancer, df_control], axis=1)
dataframes = [df, df_cancer, df_control]
return dataframes
def filter_nans(dataframe):
df = dataframe
nans = df.isnull().sum(axis=1).tolist()
index = [index for index, value in enumerate(nans) if value <= int(dataframe.shape[1]*0.3)]
filtered_df = df.iloc[index]
return filtered_df
def create_dataset_filtered(data_folder):
path = Path(data_folder)
df_cancer = pd.concat(
[pd.read_csv(f, sep='\t', header=None, index_col=0) for f in glob.glob(rf'{path}/cancer/*.breakage')], axis=1
)
df_control = pd.concat(
[pd.read_csv(f, sep='\t', header=None, index_col=0) for f in glob.glob(rf'{path}/control/*.breakage')], axis=1
)
df_cancer_filtered = filter_nans(df_cancer)
df_control_filtered = filter_nans(df_control)
df_filtered = | pd.concat([df_cancer_filtered, df_control_filtered], axis=1) | pandas.concat |
from lifelines.fitters.coxph_fitter import CoxPHFitter
from lifelines.utils import _get_index, k_fold_cross_validation
__author__ = "KOLANICH"
__license__ = "MIT"
__copyright__ = """MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import typing
import numpy as np
import pandas
from AutoXGBoost import AutoXGBoost
from Chassis import Chassis
class XGBoostCoxPHFitter(CoxPHFitter):
"""
This class implements fitting Cox's proportional hazard model using XGBoost `cox:survival` objective contributed by @slundberg.
This module uses some libraries like Chassis and AutoXGBoost"""
def __init__(self, spec, hyperparams=None, alpha=0.95, durationColPostfix="_prep", prefix=None):
if not (0.0 < alpha <= 1.0):
raise ValueError("alpha parameter must be between 0 and 1.")
self.alpha = alpha
self.initialSpec = spec
self.hyperparams = hyperparams
self._defaultDurationCol = None
self._SHAPExplaination = None # a workaround of missing argument
self.explainations = None
self.strata = None # to make CoxPHFitter happy
self.prefix = prefix
for k, v in spec.items():
if v == "survival":
self._defaultDurationCol = k
break
def prepareFitting(self, df, duration_col=None, event_col=None, weights_col=None):
self.spec = type(self.initialSpec)(self.initialSpec)
df = df.copy()
if duration_col:
#df = df.sort_values(by=duration_col)
pass
duration_col_transformed = None
#print("\x1b[31m", "event_col", event_col, "\x1b[0m")
if event_col is not None:
#print("\x1b[31m", "duration_col", duration_col, "\x1b[0m")
if duration_col is None:
if self._defaultDurationCol:
duration_col_transformed = self._defaultDurationCol
else:
if self.spec[duration_col] == "survival": # the shit is already supplied transformed
duration_col_transformed = duration_col
elif self.spec[duration_col] in {"numerical", "stop"}:
duration_col_transformed = duration_col + "_prep"
#print("\x1b[31m", "duration_col_transformed not in df.columns", duration_col_transformed not in df.columns, "\x1b[0m")
if duration_col_transformed not in df.columns:
df.loc[:, duration_col_transformed] = df.loc[:, duration_col] * (df.loc[:, event_col] * 2 - 1)
#print("\x1b[31m", "df.loc[:, duration_col_transformed]", df.loc[:, duration_col_transformed], "\x1b[0m")
self.spec[duration_col] = "stop"
self.spec[event_col] = "stop"
else:
assert duration_col is not None
duration_col_transformed = duration_col
self.duration_col_transformed = duration_col_transformed
self.spec[self.duration_col_transformed] = "survival"
if weights_col:
self.spec[weights_col] = "weight"
#print(df)
return AutoXGBoost(self.spec, df, prefix=self.prefix)
def optimizeHyperparams(self, df, duration_col=None, event_col=None, weights_col=None, show_progress=False, autoSave: bool = True, folds: int = 10, iters: int = 1000, jobs: int = None, optimizer: "UniOpt.core.Optimizer" = None, force: typing.Optional[bool] = None, *args, **kwargs):
print(df)
self.axg = self.prepareFitting(df, duration_col=duration_col, event_col=event_col, weights_col=weights_col)
self.axg.optimizeHyperparams(columns={self.duration_col_transformed}, autoSave=autoSave, folds=folds, iters=iters, jobs=jobs, optimizer=optimizer, force=force, *args, **kwargs)
def _preprocess_dataframe(self, duration_col, event_col, weights_col):
E = self.axg.select(columns={event_col})[event_col]
T = self.axg.select(columns={duration_col})[duration_col]
X = self.axg.prepareCovariates(self.duration_col_transformed)
W = self.axg.weights if weights_col is not None else pandas.Series(np.ones((len(X),)), index=X.index, name="$aggregateWeight")
return X, T, E, W, None, None
def fit(self, df, duration_col=None, event_col=None, show_progress=True, initial_point=None, weights_col=None, saveLoadModel=None, format="binary"):
"""
Fit the XGBoost Cox Propertional Hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and `event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to the lifetimes of the subjects. `event_col` refers to whether the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects' lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative algorithm. Default is the zero vector.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc."""
self.axg = self.prepareFitting(df, duration_col=duration_col, event_col=event_col, weights_col=weights_col)
assert self.duration_col_transformed
if self.hyperparams is not None:
self.axg.bestHyperparams = self.hyperparams
else:
self.axg.loadHyperparams()
if saveLoadModel is True:
self.axg.loadModel(cn=self.duration_col_transformed, format=format)
else:
#print(df[self.duration_col_transformed])
self.axg.trainModels((self.duration_col_transformed,))
if saveLoadModel is False:
self.axg.models[self.duration_col_transformed].save(format=format)
#self.confidence_intervals_ = self._compute_confidence_intervals()
X, T, E, W, original_index, _clusters = self._preprocess_dataframe(duration_col, event_col, weights_col)
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=T, E=E, W=W)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
#self.baseline_survival_ = self._compute_baseline_survival()
#self.score_ = concordance_index(self.durations, -self.baseline_survival_, self.event_observed)
return self
def _SHAPExplainationMissingArgumentWorkaround(self, SHAPInteractions):
if SHAPInteractions is not None:
assert self._SHAPExplaination is None
self._SHAPExplaination = SHAPInteractions
def predict_partial_hazard(self, X, SHAPInteractions=None):
self._SHAPExplainationMissingArgumentWorkaround(SHAPInteractions)
return super().predict_partial_hazard(X)
def predict_log_hazard_relative_to_mean(self, X, SHAPInteractions=None):
self._SHAPExplainationMissingArgumentWorkaround(SHAPInteractions)
return super().predict_log_hazard_relative_to_mean(X)
def predict_expectation(self, X, SHAPInteractions=None):
"""lifelines-expected function to predict expectation"""
self._SHAPExplainationMissingArgumentWorkaround(SHAPInteractions)
return super().predict_expectation(X)
def predictExpectation(self, X, SHAPInteractions=None):
"""our function to predict expectation"""
res = self.predict_expectation(X, SHAPInteractions)[0]
res.name = "predicted_survival"
return pandas.DataFrame(res)
def crossvalidate(self, pds, folds: int):
return k_fold_cross_validation(self, pds, duration_col="duration_worked", event_col="failed", k=folds)
def predict_log_partial_hazard(self, X, SHAPInteractions=None):
if not isinstance(X, Chassis):
dmat = AutoXGBoost(self.spec, X)
else:
dmat = X
shouldDelete = self.duration_col_transformed not in X
#print("\x1b[31m", "shouldDelete", shouldDelete, "\x1b[0m")
#print("\x1b[31m", "dmat.pds.loc[:, self.duration_col_transformed]", dmat.pds.loc[:, self.duration_col_transformed], "\x1b[0m")
#from traceback import print_stack
#print_stack()
if SHAPInteractions is not None:
assert self._SHAPExplaination is None
else:
SHAPInteractions = self._SHAPExplaination
self._SHAPExplaination = None
res = self.axg.predict(self.duration_col_transformed, dmat, returnPandas=True, SHAPInteractions=SHAPInteractions)
if SHAPInteractions is None:
self.explainations = None
else:
res, self.explainations = res
if shouldDelete and self.duration_col_transformed in X:
del X[self.duration_col_transformed]
res.name = 0
fRes = | pandas.DataFrame(res) | pandas.DataFrame |
from utilities import services
from utilities import graph_helpers
import pandas as pd
from matplotlib import pyplot as plt
import datetime
from datetime import timezone
import json
#TODO:
#Add button to switch between graphs or split into separate figs
#Add total numbers on graph as key
#Extend to add year totals
try:
services.get_environmental_var('StarlingPersonalAccessToken')
except:
raise('Get personal access token failed')
items_to_ignore = json.loads(services.get_config_var('feed_items_to_ignore'))
items_to_divide = json.loads(services.get_config_var('feed_items_to_divide'))
def get_transactions() -> list:
today = datetime.datetime.now().replace(microsecond=0).isoformat() + 'Z'
transactions = services.get_transactions(services.get_config_var('transactions_start_date'), today)
return transactions
def get_outbound_transactions(transactions: list) -> pd.DataFrame:
df = pd.DataFrame(transactions)
df['transactionTime'] = | pd.to_datetime(df['transactionTime']) | pandas.to_datetime |
"""
Writer for amber
"""
import time
import pandas as pd
import math
import re
import numpy as np
from collections import Counter
# Python 2/3 compat
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import eex
import logging
# AMBER local imports
from . import amber_metadata as amd
logger = logging.getLogger(__name__)
def _write_1d(file_handle, data, ncols, fmt):
data = data.ravel()
remainder_size = data.size % ncols
if data.size == 0:
file_handle.write("\n".encode())
elif remainder_size == 0:
np.savetxt(file_handle, data.reshape(-1, ncols), fmt=fmt, delimiter="")
else:
rem_data = data[-remainder_size:].reshape(1, -1)
data = data[:-remainder_size].reshape(-1, ncols)
np.savetxt(file_handle, data, fmt=fmt, delimiter="")
np.savetxt(file_handle, rem_data, fmt=fmt, delimiter="")
# print(data.shape, rem_data.shape)
# Write data to file
file_handle.flush()
def _write_amber_data(file_handle, data, category):
fmt_string = amd.data_labels[category][1]
fmt_data = amd.parse_format(fmt_string)
file_handle.write(("%%FLAG %s\n" % category).encode())
file_handle.write((fmt_string + "\n").encode())
ncols = fmt_data[0]
fmt = amd.build_format(fmt_data)
_write_1d(file_handle, np.array(data), ncols, fmt)
def _check_dl_compatibility(dl):
"""
This function examines a datalayer to determine if it is compatible with Amber.
Conversions between functional forms and pairwise interaction mixing are performed (if possible).
"""
# Loop over force field information - check functional form compatibility
for k, v in amd.forcefield_parameters.items():
if k != "nonbond":
terms = dl.list_term_parameters(v["order"])
for j in terms.values():
if j[0] != v["form"]:
# Will need to insert check to see if these can be easily converted (ex OPLS dihedral <-> charmmfsw)
raise TypeError("Functional form %s stored in datalayer is not compatible with Amber.\n" %(j[0]) )
else:
# handle non bonds here
pass
stored_properties = dl.list_atom_properties()
required_properties = list(amd.atom_property_names.values())
diff = np.setdiff1d(required_properties, stored_properties)
natoms = dl.get_atom_count()
index = np.arange(1, natoms + 1)
# Build and curate the data
df = | pd.DataFrame({'atom_index': index}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from etna.datasets.tsdataset import TSDataset
from etna.models.linear import ElasticMultiSegmentModel
from etna.models.linear import ElasticPerSegmentModel
from etna.models.linear import LinearMultiSegmentModel
from etna.models.linear import LinearPerSegmentModel
from etna.transforms.datetime_flags import DateFlagsTransform
from etna.transforms.lags import LagTransform
@pytest.fixture
def ts_with_categoricals(random_seed) -> TSDataset:
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = "segment_1"
df1["target"] = np.random.uniform(10, 20, size=periods)
df1["cat_feature"] = "x"
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = "segment_2"
df2["target"] = np.random.uniform(-15, 5, size=periods)
df1["cat_feature"] = "y"
df = pd.concat([df1, df2]).reset_index(drop=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df, freq="D")
return ts
def linear_segments_by_parameters(alpha_values, intercept_values):
dates = | pd.date_range(start="2020-02-01", freq="D", periods=210) | pandas.date_range |
"""
This module plots test results.
"""
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import log_loss
import RecessionPredictor_paths as path
class TestResultPlots:
"""
The manager class for this module.
"""
def __init__(self):
"""
prediction_names: used to re-label chart data
"""
self.pdf_object = ''
self.prediction_names = {'Pred_Recession_within_6mo': 'Within 6 Months',
'Pred_Recession_within_12mo': 'Within 12 Months',
'Pred_Recession_within_24mo': 'Within 24 Months',
'True_Recession': 'Recession'}
self.average_model = pd.DataFrame()
def calculate_log_loss_weights(self, y_true):
"""
Calculates weight adjustments for class outputs, such that each class
receives the same weight in log loss calculations.
y_true: an iterable of class outputs to be weighted.
"""
log_loss_weights = []
true_output_labels = y_true.unique()
desired_weight = 1 / len(true_output_labels)
class_weights = {}
for label in true_output_labels:
training_frequency = (len(y_true[y_true == label]) / len(y_true))
multiplier = desired_weight / training_frequency
class_weights[str(label)] = multiplier
for sample in y_true:
log_loss_weights.append(class_weights[str(sample)])
return(log_loss_weights)
def exponential_smoother(self, raw_data, half_life):
"""
Purpose: performs exponential smoothing on "raw_data". Begins recursion
with the first data item (i.e. assumes that data in "raw_data" is listed
in chronological order).
raw_data: iterable, the data to be smoothed
half_life: float, the half-life for the smoother. The smoothing factor
(alpha) is calculated as alpha = 1 - exp(ln(0.5) / half_life)
"""
import math
raw_data = list(raw_data)
half_life = float(half_life)
smoothing_factor = 1 - math.exp(math.log(0.5) / half_life)
smoothed_values = [raw_data[0]]
for index in range(1, len(raw_data)):
previous_smooth_value = smoothed_values[-1]
new_unsmooth_value = raw_data[index]
new_smooth_value = ((smoothing_factor * new_unsmooth_value)
+ ((1 - smoothing_factor) * previous_smooth_value))
smoothed_values.append(new_smooth_value)
return(smoothed_values)
def exponential_conversion(self, dataframe):
"""
Performs exponential smoothing on specific columns of the dataframe.
"""
dataframe['Within 6 Months'] = self.exponential_smoother(raw_data=dataframe['Within 6 Months'],
half_life=3)
dataframe['Within 12 Months'] = self.exponential_smoother(raw_data=dataframe['Within 12 Months'],
half_life=3)
dataframe['Within 24 Months'] = self.exponential_smoother(raw_data=dataframe['Within 24 Months'],
half_life=3)
return(dataframe)
def plot_probabilities(self, dataframe, name, exponential):
"""
Sets chart parameters, generates the chart, and saves it.
dataframe: dataframe, the dataframe to be plotted
name: sting, chart title
exponential: boolean, whether to plot exponentially weighted output data or not
"""
dataframe = pd.DataFrame(dataframe)
name = str(name)
exponential = bool(exponential)
dataframe.rename(columns=self.prediction_names, inplace=True)
if exponential == True:
dataframe = self.exponential_conversion(dataframe=dataframe)
is_recession = dataframe['Recession'] == 1
is_not_recession = dataframe['Recession'] == 0
dataframe.loc[is_recession, 'Recession'] = 100
dataframe.loc[is_not_recession, 'Recession'] = -1
log_loss_weights_6mo = self.calculate_log_loss_weights(y_true=dataframe['True_Recession_within_6mo'])
log_loss_weights_12mo = self.calculate_log_loss_weights(y_true=dataframe['True_Recession_within_12mo'])
log_loss_weights_24mo = self.calculate_log_loss_weights(y_true=dataframe['True_Recession_within_24mo'])
loss_6mo = log_loss(y_true=dataframe['True_Recession_within_6mo'],
y_pred=dataframe['Within 6 Months'],
sample_weight=log_loss_weights_6mo)
loss_12mo = log_loss(y_true=dataframe['True_Recession_within_12mo'],
y_pred=dataframe['Within 12 Months'],
sample_weight=log_loss_weights_12mo)
loss_24mo = log_loss(y_true=dataframe['True_Recession_within_24mo'],
y_pred=dataframe['Within 24 Months'],
sample_weight=log_loss_weights_24mo)
dataframe = dataframe[['Dates'] + list(self.prediction_names.values())]
chart_title = '{} | 6mo: {} | 12mo: {} | 24mo: {}'.format(name,
round(loss_6mo, 3), round(loss_12mo, 3),
round(loss_24mo, 3))
plt.figure(figsize=(15, 5))
plot = sns.lineplot(x='Dates', y='value', hue='variable',
data=pd.melt(dataframe, ['Dates']))
plot.set_ylabel('Probability')
plot.set_title(chart_title, fontsize = 20)
plot.set_ylim((0, 1))
self.pdf_object.savefig()
def average_model_outputs(self):
"""
Creates outputs for a Grand Average model by averaging across all
model outputs.
"""
from statistics import mean
self.average_model['Dates'] = self.knn_test_results['Dates']
self.average_model['Recession'] = self.knn_test_results['True_Recession']
self.average_model['True_Recession_within_6mo'] = self.knn_test_results['True_Recession_within_6mo']
self.average_model['True_Recession_within_12mo'] = self.knn_test_results['True_Recession_within_12mo']
self.average_model['True_Recession_within_24mo'] = self.knn_test_results['True_Recession_within_24mo']
model_outputs = [self.knn_test_results, self.elastic_net_test_results,
self.naive_bayes_test_results, self.svm_test_results,
self.gauss_test_results, self.xgboost_test_results]
average_6mo = []
average_12mo = []
average_24mo = []
for index in range(0, len(self.knn_test_results)):
outputs_6mo = []
outputs_12mo = []
outputs_24mo = []
for model in model_outputs:
outputs_6mo.append(model['Pred_Recession_within_6mo'][index])
outputs_12mo.append(model['Pred_Recession_within_12mo'][index])
outputs_24mo.append(model['Pred_Recession_within_24mo'][index])
average_6mo.append(mean(outputs_6mo))
average_12mo.append(mean(outputs_12mo))
average_24mo.append(mean(outputs_24mo))
self.average_model['Within 6 Months'] = average_6mo
self.average_model['Within 12 Months'] = average_12mo
self.average_model['Within 24 Months'] = average_24mo
def plot_test_results(self):
"""
Loads test results for each model, and plots them all into a single PDF.
"""
self.knn_test_results = pd.read_json(path.knn_test_results)
self.knn_test_results.sort_index(inplace=True)
self.elastic_net_test_results = pd.read_json(path.elastic_net_test_results)
self.elastic_net_test_results.sort_index(inplace=True)
self.naive_bayes_test_results = pd.read_json(path.naive_bayes_test_results)
self.naive_bayes_test_results.sort_index(inplace=True)
self.svm_test_results = pd.read_json(path.svm_test_results)
self.svm_test_results.sort_index(inplace=True)
self.gauss_test_results = pd.read_json(path.gauss_test_results)
self.gauss_test_results.sort_index(inplace=True)
self.xgboost_test_results = pd.read_json(path.xgboost_test_results)
self.xgboost_test_results.sort_index(inplace=True)
self.weighted_average_test_results = | pd.read_json(path.weighted_average_test_results) | pandas.read_json |
# %%
import pandas
import altair
from plot_shared import plot_points_average_and_trend
from data_shared import get_ni_pop_pyramid
import datetime
# %%
def load_grouped_time_series(df, date_col, group_col, series_col, new_name, model=True):
df = df.pivot(index=date_col,columns=group_col,values=series_col)
newind = pandas.date_range(start=df.index.min(), end=df.index.max())
df = df.reindex(newind)
df = df.fillna(0)
df = df.reset_index().melt(id_vars='index', var_name=group_col, value_name=series_col)
df = df.rename(columns={'index': 'Date'}).sort_values('Date')
df['%s 7-day rolling mean' %new_name] = df.groupby(group_col).rolling(7).mean().droplevel(0)
if model is True:
df = create_models(df, group_col, '%s 7-day rolling mean' %new_name)
return df
# %%
adm_band_mapping = | pandas.DataFrame({'Age Band': ['Aged 0 - 19', 'Aged 40 - 49', 'Aged 50 - 59', 'Aged 60 - 69',
'Aged 70 - 79', 'Aged 80 & Over', 'Unknown', 'Aged 20 - 39'], 'Group': ['0 - 19', '20+', '20+', '20+', '20+', '20+', 'Unknown', '20+']}) | pandas.DataFrame |
"""
MergeConcat
-----------
A class to merge or concat dataframes
"""
from typing import List, Union
import pandas as pd
from pandas.core.common import maybe_make_list
from soam.core import Step
class MergeConcat(Step):
def __init__(
self, keys: Union[str, List[str], None] = None, **kwargs,
):
"""
Merge on concat dataframes dependending on the keys
Parameters
----------
keys:
str or list of str labels of columns to merge on
"""
super().__init__(**kwargs)
if keys is None:
keys = []
self.keys = maybe_make_list(keys)
self.complete_df = | pd.DataFrame(columns=self.keys) | pandas.DataFrame |
import torch
import pandas as pd
from pathlib import Path
import logging
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from imutils.paths import list_images
import cv2
from enum import IntEnum
from collections import namedtuple
import sys
_cur=Path(__file__).absolute().parent
sys.path.insert(0,str(_cur))
#print(sys.path)
from to_onnx import export2onnx
from general import set_dir
def get_labels(label_path):
df=pd.read_csv(label_path,header=None)
if len(df.columns)==1:
df.columns='label'
elif len(df.columns)==2:
df.columns=['label','index']
else:
raise RuntimeError('Unknow csv file')
label=df.label.values
classes=IntEnum('classLabels',tuple(label),start=0)
return classes
class Meter(object):
def __init__(self,device='cpu'):
self.preds=torch.tensor([]).to(device)
self.gts=torch.tensor([]).to(device)
#self.criterion=criterion
self.value=0
self.length=0
def collect(self,value,batch_size):
self.value+=value
self.length+=batch_size
@property
def average(self):
return self.value/self.length
@torch.no_grad()
def update(self, preds,gts,ids=None):
preds=self.avoid_zero_dim(preds)
gts=self.avoid_zero_dim(gts)
self.preds=torch.cat([self.preds,preds])
self.gts=torch.cat([self.gts,gts.long()])
if ids is not None:
ids=self.avoid_zero_dim(ids)
self.ids=torch.cat([self.ids,ids])
@staticmethod
def avoid_zero_dim(tensor):
tensor=torch.as_tensor(tensor)
if not tensor.size():
tensor=tensor[None]
return tensor
@staticmethod
def message(msg,epoch,print_freq):
logging.info(msg)
if epoch%print_freq==0:
print(msg)
@property
def acc(self):
return torch.mean((self.preds==self.gts)*1.0).item()
class Model_Saver():
def __init__(self,mname,timestamp,
val_thres=0.8,saved_model_number=2,direction='max',save_info=None):
if direction=='min':
self.val_thres=val_thres*-1
elif direction=='max':
self.val_thres=val_thres
else:
raise ValueError(dirction)
self.saved_model_number=saved_model_number
self.df= | pd.DataFrame(columns=['fpath','epoch','val']) | pandas.DataFrame |
#%%
import jieba
from gensim.models import word2vec
import pandas as pd
import numpy as np
df = pd.read_csv('train_data.csv')
df = df.fillna('')
df1 = pd.read_csv('test_data.csv')
df1 = df1.fillna('')
gg = pd.read_excel('e04.xlsx')
gg = list(gg.e04.values)
y_train = df['label']
df = df.drop(['ID','label_name','label'],axis = 1)
df1 = df1.drop(['id'],axis = 1)
data = | pd.concat([df,df1],axis = 0) | pandas.concat |
#
# Author: <NAME>, April 26.2018
#
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
os.getcwd() # Get and place .py in same directory as .xls initially
os.chdir('./') # Path to .xls file
from pandas import read_excel
df = | read_excel('rssi_data_challenge2.xls') | pandas.read_excel |
import sys
import re
import pandas as pd
import lxml.html
year_week_re = re.compile('year/(\d{4})/week/(\d{1,2})')
week_re = re.compile('week/(\d{1,2})')
team_score_re = re.compile('([.\w\s]+)\s(\d{1,2}),\s([.\w\s]+)\s(\d{1,2})')
def main(html):
root = lxml.html.fromstring(html)
url = root.xpath("//link[@rel='canonical']")[0].get('href')
year_week = year_week_re.search(url)
if year_week:
year, week = map(int, year_week.groups())
else:
year, week = 2014, int(week_re.search(url).group(1))
rows = []
for a in root.xpath("//a[starts-with(@href, '/nfl/boxscore?gameId')]"):
team1, score1, team2, score2 = team_score_re.search(a.text).groups()
game_id = a.get('href').split('=')[-1]
rows.append({
'year': year,
'week': week,
'game_id': game_id,
'team1': team1,
'team2': team2,
'score1': score1,
'score2': score2,
})
return | pd.DataFrame(rows) | pandas.DataFrame |
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
class TestSeriesCumulativeOps:
@pytest.mark.parametrize("func", [np.cumsum, np.cumprod])
def test_datetime_series(self, datetime_series, func):
tm.assert_numpy_array_equal(
func(datetime_series).values,
func(np.array(datetime_series)),
check_dtype=True,
)
# with missing values
ts = datetime_series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
@pytest.mark.parametrize("method", ["cummin", "cummax"])
def test_cummin_cummax(self, datetime_series, method):
ufunc = methods[method]
result = getattr(datetime_series, method)().values
expected = ufunc(np.array(datetime_series))
tm.assert_numpy_array_equal(result, expected)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = getattr(ts, method)()[1::2]
expected = ufunc(ts.dropna())
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
pd.Timedelta(0),
pd.Timestamp("1999-12-31"),
pd.Timestamp("1999-12-31").tz_localize("US/Pacific"),
],
)
@pytest.mark.parametrize(
"method, skipna, exp_tdi",
[
["cummax", True, ["NaT", "2 days", "NaT", "2 days", "NaT", "3 days"]],
["cummin", True, ["NaT", "2 days", "NaT", "1 days", "NaT", "1 days"]],
[
"cummax",
False,
["NaT", "2 days", "2 days", "2 days", "2 days", "3 days"],
],
[
"cummin",
False,
["NaT", "2 days", "2 days", "1 days", "1 days", "1 days"],
],
],
)
def test_cummin_cummax_datetimelike(self, ts, method, skipna, exp_tdi):
# with ts==pd.Timedelta(0), we are testing td64; with naive Timestamp
# we are testing datetime64[ns]; with Timestamp[US/Pacific]
# we are testing dt64tz
tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "3 days"])
ser = pd.Series(tdi + ts)
exp_tdi = pd.to_timedelta(exp_tdi)
expected = pd.Series(exp_tdi + ts)
result = getattr(ser, method)(skipna=skipna)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"arg",
[
[False, False, False, True, True, False, False],
[False, False, False, False, False, False, False],
],
)
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: ~x], ids=["identity", "inverse"]
)
@pytest.mark.parametrize("method", methods.keys())
def test_cummethods_bool(self, arg, func, method):
# GH#6270
# checking Series method vs the ufunc applied to the values
ser = func(pd.Series(arg))
ufunc = methods[method]
exp_vals = ufunc(ser.values)
expected = pd.Series(exp_vals)
result = getattr(ser, method)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, expected",
[
["cumsum", | pd.Series([0, 1, np.nan, 1], dtype=object) | pandas.Series |
import unittest
import platform
import random
import string
import platform
import pandas as pd
import numpy as np
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs, count_parfor_OneDs,
count_array_OneDs, dist_IR_contains, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba.config import IS_32BITS
@hpat.jit
def inner_get_column(df):
# df2 = df[['A', 'C']]
# df2['D'] = np.ones(3)
return df.A
COL_IND = 0
class TestDataFrame(unittest.TestCase):
def test_create1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_create_cond1(self):
def test_impl(A, B, c):
if c:
df = pd.DataFrame({'A': A})
else:
df = pd.DataFrame({'A': B})
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.ones(n)
B = np.arange(n) + 1.0
c = 0
pd.testing.assert_series_equal(hpat_func(A, B, c), test_impl(A, B, c))
c = 2
pd.testing.assert_series_equal(hpat_func(A, B, c), test_impl(A, B, c))
@unittest.skip('Implement feature to create DataFrame without column names')
def test_create_without_column_names(self):
def test_impl():
df = pd.DataFrame([100, 200, 300, 400, 200, 100])
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
def test_unbox1(self):
def test_impl(df):
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
pd.testing.assert_series_equal(hpat_func(df), test_impl(df))
@unittest.skip("needs properly refcounted dataframes")
def test_unbox2(self):
def test_impl(df, cond):
n = len(df)
if cond:
df['A'] = np.arange(n) + 2.0
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
pd.testing.assert_series_equal(hpat_func(df.copy(), True), test_impl(df.copy(), True))
pd.testing.assert_series_equal(hpat_func(df.copy(), False), test_impl(df.copy(), False))
@unittest.skip('Implement feature to create DataFrame without column names')
def test_unbox_without_column_names(self):
def test_impl(df):
return df
df = pd.DataFrame([100, 200, 300, 400, 200, 100])
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_box1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_box2(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'bb', 'ccc']})
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
@unittest.skip("pending df filter support")
def test_box3(self):
def test_impl(df):
df = df[df.A != 'dd']
return df
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_box_categorical(self):
def test_impl(df):
df['A'] = df['A'] + 1
return df
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3],
'B': pd.Series(['N', 'Y', 'Y'],
dtype=pd.api.types.CategoricalDtype(['N', 'Y']))})
pd.testing.assert_frame_equal(hpat_func(df.copy(deep=True)), test_impl(df))
def test_box_dist_return(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df
hpat_func = hpat.jit(distributed={'df'})(test_impl)
n = 11
hres, res = hpat_func(n), test_impl(n)
self.assertEqual(count_array_OneDs(), 3)
self.assertEqual(count_parfor_OneDs(), 2)
dist_sum = hpat.jit(
lambda a: hpat.distributed_api.dist_reduce(
a, np.int32(hpat.distributed_api.Reduce_Type.Sum.value)))
dist_sum(1) # run to compile
np.testing.assert_allclose(dist_sum(hres.A.sum()), res.A.sum())
np.testing.assert_allclose(dist_sum(hres.B.sum()), res.B.sum())
def test_len1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.random.ranf(n)})
return len(df)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_shape1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.random.ranf(n)})
return df.shape
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_column_getitem1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df['A'].values
return Ac.sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
def test_column_list_getitem1(self):
def test_impl(df):
return df[['A', 'C']]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_filter1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df[df.A > .5]
return df1.B.sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_filter2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df.loc[df.A > .5]
return np.sum(df1.B)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_filter3(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df.iloc[(df.A > .5).values]
return np.sum(df1.B)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_iloc1(self):
def test_impl(df, n):
return df.iloc[1:n].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc2(self):
def test_impl(df, n):
return df.iloc[np.array([1, 4, 9])].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc3(self):
def test_impl(df):
return df.iloc[:, 1].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
@unittest.skip("TODO: support A[[1,2,3]] in Numba")
def test_iloc4(self):
def test_impl(df, n):
return df.iloc[[1, 4, 9]].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc5(self):
# test iloc with global value
def test_impl(df):
return df.iloc[:, COL_IND].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_loc1(self):
def test_impl(df):
return df.loc[:, 'B'].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_iat1(self):
def test_impl(n):
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
return df.iat[3, 1]
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_iat2(self):
def test_impl(df):
return df.iat[3, 1]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
self.assertEqual(hpat_func(df), test_impl(df))
def test_iat3(self):
def test_impl(df, n):
return df.iat[n - 1, 1]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
self.assertEqual(hpat_func(df, n), test_impl(df, n))
def test_iat_set1(self):
def test_impl(df, n):
df.iat[n - 1, 1] = n**2
return df.A # return the column to check column aliasing
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
df2 = df.copy()
pd.testing.assert_series_equal(hpat_func(df, n), test_impl(df2, n))
def test_iat_set2(self):
def test_impl(df, n):
df.iat[n - 1, 1] = n**2
return df # check df aliasing/boxing
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
df2 = df.copy()
pd.testing.assert_frame_equal(hpat_func(df, n), test_impl(df2, n))
def test_set_column1(self):
# set existing column
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df['A'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column_reflect4(self):
# set existing column
def test_impl(df, n):
df['A'] = np.arange(n)
hpat_func = hpat.jit(test_impl)
n = 11
df1 = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df2 = df1.copy()
hpat_func(df1, n)
test_impl(df2, n)
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(df1, df2, check_dtype=do_check)
def test_set_column_new_type1(self):
# set existing column with a new type
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n) + 3.0})
df['A'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column2(self):
# create new column
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n) + 1.0})
df['C'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column_reflect3(self):
# create new column
def test_impl(df, n):
df['C'] = np.arange(n)
hpat_func = hpat.jit(test_impl)
n = 11
df1 = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df2 = df1.copy()
hpat_func(df1, n)
test_impl(df2, n)
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(df1, df2, check_dtype=do_check)
def test_set_column_bool1(self):
def test_impl(df):
df['C'] = df['A'][df['B']]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
df2 = df.copy()
test_impl(df2)
hpat_func(df)
pd.testing.assert_series_equal(df.C, df2.C)
def test_set_column_reflect1(self):
def test_impl(df, arr):
df['C'] = arr
return df.C.sum()
hpat_func = hpat.jit(test_impl)
n = 11
arr = np.random.ranf(n)
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
hpat_func(df, arr)
self.assertIn('C', df)
np.testing.assert_almost_equal(df.C.values, arr)
def test_set_column_reflect2(self):
def test_impl(df, arr):
df['C'] = arr
return df.C.sum()
hpat_func = hpat.jit(test_impl)
n = 11
arr = np.random.ranf(n)
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df2 = df.copy()
np.testing.assert_almost_equal(hpat_func(df, arr), test_impl(df2, arr))
def test_df_values1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.values
hpat_func = hpat.jit(test_impl)
n = 11
np.testing.assert_array_equal(hpat_func(n), test_impl(n))
def test_df_values2(self):
def test_impl(df):
return df.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_df_values_parallel1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.values.sum()
hpat_func = hpat.jit(test_impl)
n = 11
np.testing.assert_array_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_df_apply(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)})
B = df.apply(lambda r: r.A + r.B, axis=1)
return df.B.sum()
n = 121
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_df_apply_branch(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)})
B = df.apply(lambda r: r.A < 10 and r.B > 20, axis=1)
return df.B.sum()
n = 121
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_df_describe(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32),
'B': np.arange(n)})
#df.A[0:1] = np.nan
return df.describe()
hpat_func = hpat.jit(test_impl)
n = 1001
hpat_func(n)
# XXX: test actual output
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_sort_values(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.B.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n), 'B': np.arange(n), 'C': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_copy(self):
def test_impl(df):
df2 = df.sort_values('A')
return df2.B.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n), 'B': np.arange(n), 'C': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_single_col(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.A.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_single_col_str(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.A.values
n = 1211
random.seed(2)
str_vals = []
for _ in range(n):
k = random.randint(1, 30)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals.append(val)
df = pd.DataFrame({'A': str_vals})
hpat_func = hpat.jit(test_impl)
self.assertTrue((hpat_func(df.copy()) == test_impl(df)).all())
def test_sort_values_str(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.B.values
n = 1211
random.seed(2)
str_vals = []
str_vals2 = []
for i in range(n):
k = random.randint(1, 30)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals.append(val)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals2.append(val)
df = pd.DataFrame({'A': str_vals, 'B': str_vals2})
# use mergesort for stability, in str generation equal keys are more probable
sorted_df = df.sort_values('A', inplace=False, kind='mergesort')
hpat_func = hpat.jit(test_impl)
self.assertTrue((hpat_func(df) == sorted_df.B.values).all())
def test_sort_parallel_single_col(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
# TODO: better parallel sort test
def test_impl():
df = pd.read_parquet('kde.parquet')
df.sort_values('points', inplace=True)
res = df.points.values
return res
hpat_func = hpat.jit(locals={'res:return': 'distributed'})(test_impl)
save_min_samples = hpat.hiframes.sort.MIN_SAMPLES
try:
hpat.hiframes.sort.MIN_SAMPLES = 10
res = hpat_func()
self.assertTrue((np.diff(res) >= 0).all())
finally:
# restore global val
hpat.hiframes.sort.MIN_SAMPLES = save_min_samples
def test_df_isna1(self):
'''Verify DataFrame.isna implementation for various types of data'''
def test_impl(df):
return df.isna()
hpat_func = hpat.jit(test_impl)
# TODO: add column with datetime values when test_series_datetime_isna1 is fixed
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0],
'B': [np.inf, 5, np.nan, 6],
'C': ['aa', 'b', None, 'ccc'],
'D': [None, 'dd', '', None]})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_str1(self):
'''Verifies DataFrame.astype implementation converting various types to string'''
def test_impl(df):
return df.astype(str)
hpat_func = hpat.jit(test_impl)
# TODO: add column with float values when test_series_astype_float_to_str1 is fixed
df = pd.DataFrame({'A': [-1, 2, 11, 5, 0, -7],
'B': ['aa', 'bb', 'cc', 'dd', '', 'fff']
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_float1(self):
'''Verifies DataFrame.astype implementation converting various types to float'''
def test_impl(df):
return df.astype(np.float64)
hpat_func = hpat.jit(test_impl)
# TODO: uncomment column with string values when test_series_astype_str_to_float64 is fixed
df = pd.DataFrame({'A': [-1, 2, 11, 5, 0, -7],
# 'B': ['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'],
'C': [3.24, 1E+05, -1, -1.3E-01, np.nan, np.inf]
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_int1(self):
'''Verifies DataFrame.astype implementation converting various types to int'''
def test_impl(df):
return df.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 6
# TODO: uncomment column with string values when test_series_astype_str_to_int32 is fixed
df = pd.DataFrame({'A': np.ones(n, dtype=np.int64),
'B': np.arange(n, dtype=np.int32),
# 'C': ['-1', '2', '3', '0', '-7', '99'],
'D': np.arange(float(n), dtype=np.float32)
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_sort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
# TODO: better parallel sort test
def test_impl():
df = pd.read_parquet('kde.parquet')
df['A'] = df.points.astype(np.float64)
df.sort_values('points', inplace=True)
res = df.A.values
return res
hpat_func = hpat.jit(locals={'res:return': 'distributed'})(test_impl)
save_min_samples = hpat.hiframes.sort.MIN_SAMPLES
try:
hpat.hiframes.sort.MIN_SAMPLES = 10
res = hpat_func()
self.assertTrue((np.diff(res) >= 0).all())
finally:
# restore global val
hpat.hiframes.sort.MIN_SAMPLES = save_min_samples
def test_itertuples(self):
def test_impl(df):
res = 0.0
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.ones(n, np.int64)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_itertuples_str(self):
def test_impl(df):
res = ""
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 3
df = pd.DataFrame({'A': ['aa', 'bb', 'cc'], 'B': np.ones(n, np.int64)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_itertuples_order(self):
def test_impl(n):
res = 0.0
df = pd.DataFrame({'B': np.arange(n), 'A': np.ones(n, np.int64)})
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_itertuples_analysis(self):
"""tests array analysis handling of generated tuples, shapes going
through blocks and getting used in an array dimension
"""
def test_impl(n):
res = 0
df = pd.DataFrame({'B': np.arange(n), 'A': np.ones(n, np.int64)})
for r in df.itertuples():
if r[1] == 2:
A = np.ones(r[1])
res += len(A)
return res
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
@unittest.skipIf(platform.system() == 'Windows', "Attribute 'dtype' are different int64 and int32")
def test_df_head1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.head(3)
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n))
def test_pct_change1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.pct_change(3)
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n))
def test_mean1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.mean()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_median1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': 2 ** np.arange(n), 'B': np.arange(n) + 1.0})
return df.median()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_std1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.std()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_var1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.var()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_max1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.max()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_min1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.min()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_sum1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.sum()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_prod1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.prod()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_count1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.count()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_df_fillna1(self):
def test_impl(df):
return df.fillna(5.0)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_fillna_str1(self):
def test_impl(df):
return df.fillna("dd")
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_fillna_inplace1(self):
def test_impl(A):
A.fillna(11.0, inplace=True)
return A
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
df2 = df.copy()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df2))
def test_df_reset_index1(self):
def test_impl(df):
return df.reset_index(drop=True)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_reset_index_inplace1(self):
def test_impl():
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
df.reset_index(drop=True, inplace=True)
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
def test_df_dropna1(self):
def test_impl(df):
return df.dropna()
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df)
pd.testing.assert_frame_equal(out, h_out)
def test_df_dropna2(self):
def test_impl(df):
return df.dropna()
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df)
pd.testing.assert_frame_equal(out, h_out)
def test_df_dropna_inplace1(self):
# TODO: fix error when no df is returned
def test_impl(df):
df.dropna(inplace=True)
return df
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
df2 = df.copy()
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df2)
pd.testing.assert_frame_equal(out, h_out)
def test_df_dropna_str1(self):
def test_impl(df):
return df.dropna()
df = pd.DataFrame({'A': [1.0, 2.0, 4.0, 1.0], 'B': ['aa', 'b', None, 'ccc']})
hpat_func = hpat.jit(test_impl)
out = test_impl(df).reset_index(drop=True)
h_out = hpat_func(df)
pd.testing.assert_frame_equal(out, h_out)
def test_df_drop1(self):
def test_impl(df):
return df.drop(columns=['A'])
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_drop_inplace2(self):
# test droping after setting the column
def test_impl(df):
df2 = df[['A', 'B']]
df2['D'] = np.ones(3)
df2.drop(columns=['D'], inplace=True)
return df2
df = pd.DataFrame({'A': [1, 2, 3], 'B': [2, 3, 4]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_drop_inplace1(self):
def test_impl(df):
df.drop('A', axis=1, inplace=True)
return df
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0], 'B': [4, 5, 6, 7]})
df2 = df.copy()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df2))
def test_isin_df1(self):
def test_impl(df, df2):
return df.isin(df2)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2 = pd.DataFrame({'A': np.arange(n), 'C': np.arange(n)**2})
df2.A[n // 2:] = n
pd.testing.assert_frame_equal(hpat_func(df, df2), test_impl(df, df2))
@unittest.skip("needs dict typing in Numba")
def test_isin_dict1(self):
def test_impl(df):
vals = {'A': [2, 3, 4], 'C': [4, 5, 6]}
return df.isin(vals)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_isin_list1(self):
def test_impl(df):
vals = [2, 3, 4]
return df.isin(vals)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_append1(self):
def test_impl(df, df2):
return df.append(df2, ignore_index=True)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2 = pd.DataFrame({'A': np.arange(n), 'C': np.arange(n)**2})
df2.A[n // 2:] = n
pd.testing.assert_frame_equal(hpat_func(df, df2), test_impl(df, df2))
def test_append2(self):
def test_impl(df, df2, df3):
return df.append([df2, df3], ignore_index=True)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2 = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
df2.A[n // 2:] = n
df3 = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
pd.testing.assert_frame_equal(
hpat_func(df, df2, df3), test_impl(df, df2, df3))
def test_concat_columns1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2], axis=1)
hpat_func = hpat.jit(test_impl)
S1 = | pd.Series([4, 5]) | pandas.Series |
from __future__ import absolute_import
from __future__ import print_function
import sys
import glob
import time
import numpy as np
import pandas as pd
import os.path
import time
import datetime
import re
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, Graph, Model
from keras.models import model_from_json
from keras.layers import Input, merge, Flatten, Dense, Activation, Convolution1D, ZeroPadding1D
#from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense, Flatten, Reshape, Permute, Merge, Lambda
#from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D, UpSampling1D, UpSampling2D, ZeroPadding1D
from keras.layers.advanced_activations import ParametricSoftplus, SReLU
from keras.callbacks import ModelCheckpoint, Callback
import matplotlib.pyplot as plt
path = "./training_data_large/" # to make sure signal files are written in same directory as data files
def draw_model(model):
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
from keras.utils.visualize_util import plot
#graph = to_graph(model, show_shape=True)
#graph.write_png("UFCNN_1.png")
SVG(model_to_dot(model).create(prog='dot', format='svg'))
plot(model, to_file='UFCNN_1.png')
def print_nodes_shapes(model):
for k, v in model.inputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.nodes.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.outputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
def print_layers_shapes(model):
for l in model.layers:
print("{} : {} : {}".format(type(l), l.input_shape, l.output_shape))
def save_neuralnet (model, model_name):
json_string = model.to_json()
open(path + model_name + '_architecture.json', 'w').write(json_string)
model.save_weights(path + model_name + '_weights.h5', overwrite=True)
yaml_string = model.to_yaml()
with open(path + model_name + '_data.yml', 'w') as outfile:
outfile.write( yaml_string)
def load_neuralnet(model_name):
"""
reading the model from disk - including all the trained weights and the complete model design (hyperparams, planes,..)
"""
arch_name = path + model_name + '_architecture.json'
weight_name = path + model_name + '_weights.h5'
if not os.path.isfile(arch_name) or not os.path.isfile(weight_name):
print("model_name given and file %s and/or %s not existing. Aborting." % (arch_name, weight_name))
sys.exit()
print("Loaded model: ",model_name)
model = model_from_json(open(arch_name).read())
model.load_weights(weight_name)
return model
def ufcnn_model_concat(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_deconv(sequence_length=5000,
features=4,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = False,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_seq(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform"):
model = Sequential()
model.add(ZeroPadding1D(2, input_shape=(None, features)))
#########################################################
model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init))
model.add(Activation('relu'))
model.add(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init))
model.add(Activation('sigmoid'))
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform",
mode='concat'):
if mode == 'concat':
return ufcnn_model_concat(sequence_length,
features,
nb_filter,
filter_length,
output_dim,
optimizer,
loss,
regression,
class_mode,
init)
else:
raise NotImplemented
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
Ernst 20160301 from https://github.com/fchollet/keras/blob/master/examples/stateful_lstm.py
as a first test for the ufcnn
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
print("Cos. Shape",cos.shape)
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
def train_and_predict_regression(model, sequence_length=5000, batch_size=128, epochs=5):
lahead = 1
cos = gen_cosine_amp(xn = sequence_length * 100)
expected_output = np.zeros((len(cos), 1, 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
model.fit({'input': cos, 'output': expected_output},
verbose=1,
nb_epoch=1,
shuffle=False,
batch_size=batch_size)
print('Predicting')
predicted_output = model.predict({'input': cos,}, batch_size=batch_size)
return {'model': model, 'predicted_output': predicted_output, 'expected_output': expected_output}
def treat_X_tradcom(mean):
""" treat some columns of the dataframe together when normalizing the dataframe:
col. 1, 2, 4 ... Mkt Price, Bid price, Ask Price
col 3 and 5 ... Ask & Bid price
"""
result = mean.copy()
#print("Result before max",result)
mkt = mean[1]
bid_px = mean[2]
ask_px = mean[4]
px_max=max(mkt,bid_px,ask_px)
result[1] = px_max
result[2] = px_max
result[4] = px_max
bid = mean[3]
ask = mean[5]
ba_max=max(bid,ask)
result[3] = ba_max
result[5] = ba_max
print("Result after max",result)
return result
def standardize_inputs(source, colgroups=None, mean=None, std=None):
"""
Standardize input features.
Groups of features could be listed in order to be standardized together.
source: Pandas.DataFrame or filename of csv file with features
colgroups: list of lists of groups of features to be standardized together (e.g. bid/ask price, bid/ask size)
returns Xdf ...Pandas.DataFrame, mean ...Pandas.DataFrame, std ...Pandas.DataFrame
"""
import itertools
import types
#if isinstance(source, types.StringTypes):
if isinstance(source, str):
Xdf = pd.read_csv(source, sep=" ", index_col = 0, header = None)
elif isinstance(source, pd.DataFrame):
Xdf = source
else:
raise TypeError
df = pd.DataFrame()
me = pd.DataFrame()
st = pd.DataFrame()
for colgroup in colgroups:
_df,_me,_st = standardize_columns(Xdf[colgroup])
# if mean & std are given, do not multiply with colgroup mean
if mean is not None and std is not None:
_df = Xdf[colgroup]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
print("In Group me")
print(me)
# _temp_list = list(itertools.chain.from_iterable(colgroups))
separate_features = [col for col in Xdf.columns if col not in list(itertools.chain.from_iterable(colgroups))]
if mean is None and std is None:
_me = Xdf[separate_features].mean()
_df = Xdf[separate_features].sub(_me)
_st = Xdf[separate_features].std()
_df = _df[separate_features].div(_st)
else:
_df = Xdf[separate_features]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
me = pd.Series(me[0])
st = pd.Series(st[0])
if mean is not None and std is not None:
df = df.sub(mean)
df = df.div(std)
return df, me, st
def standardize_columns(colgroup):
"""
Standardize group of columns together
colgroup: Pandas.DataFrame
returns: Pandas.DataFrames: Colum Group standardized, Mean of the colgroup, stddeviation of the colgroup
"""
_me = np.mean(colgroup.values.flatten())
centered = colgroup.sub(_me)
me = pd.DataFrame(np.full(len(colgroup.columns),_me), index=colgroup.columns)
_st = np.std(colgroup.values.flatten())
standardized = centered.div(_st)
st = pd.DataFrame(np.full(len(colgroup.columns),_st), index=colgroup.columns)
return standardized, me, st
def get_tradcom_normalization(filename, mean=None, std=None):
""" read in all X Data Frames and find mean and std of all columns...
"""
Xdf = pd.read_csv(filename, sep=" ", index_col = 0, header = None)
meanLoc = treat_X_tradcom(Xdf.mean())
print("Mean Loc")
print (meanLoc)
sys.stdout.flush()
if mean is None:
mean = meanLoc
mean = mean.to_frame().transpose()
meanDf=pd.concat([mean, meanLoc.to_frame().transpose()])
mean = meanDf.max()
print("Mean")
print (mean)
sys.stdout.flush()
stdLoc = treat_X_tradcom(Xdf.std())
print("Std Loc")
print (stdLoc)
sys.stdout.flush()
if std is None:
std = stdLoc
std = std.to_frame().transpose()
stdDf=pd.concat([std, stdLoc.to_frame().transpose()])
std = stdDf.max()
print("Std")
print (std)
sys.stdout.flush()
return(mean, std)
def prepare_tradcom_classification(training=True,
ret_type='df',
sequence_length=5000,
features_list=[1,2,3,4],
output_dim=3,
file_list=None,
mean=None,
std=None,
training_count=None):
"""
prepare the datasets for the trading competition. training determines which datasets will be read
returns: X and y: Pandas.DataFrames or np-Arrays storing the X - and y values for the fitting.
TODO: refactor - move file operations to separate functions, move stacking to function,
remove commented blocks and undesired print statements
"""
load_file = {'df': pd.read_pickle,
'stack': np.load,
'flat': np.load}
save_file = {'df': lambda filename, obj: obj.to_pickle(filename),
'stack': lambda filename, obj: np.save(filename, obj),
'flat': lambda filename, obj: np.save(filename, obj)}
print("Features_list",features_list)
Xdf = pd.DataFrame()
ydf = pd.DataFrame()
outfile = "training_data_large/save_"+str(len(file_list))
if training:
outfile += "_train"
else:
if training_count is None:
print("Training count needs to be given for testing")
raise ValueError
if mean is None or std is None:
print("Mean & std to be given for testing")
raise ValueError
outfile += "_"+str(training_count)+"_test"
filetype = '.pickle' if ret_type == 'df' else '.npy'
outfile_X = outfile+"_X" + filetype
outfile_y = outfile+"_y" + filetype
outfile_m = outfile+"_m" + filetype
outfile_s = outfile+"_s" + filetype
if os.path.isfile(outfile_X) and os.path.isfile(outfile_y):
X = load_file[ret_type](outfile_X)
y = load_file[ret_type](outfile_y)
#X = np.load(outfile_X)
#y = np.load(outfile_y)
if training:
mean = pd.Series(np.load(outfile_m))
std = pd.Series(np.load(outfile_s))
print("Found files ", outfile_X , " and ", outfile_y)
return (X,y,mean,std)
for filename in file_list:
signalfile = filename.replace('prod_data','signal')
signalfile = signalfile.replace('txt','csv')
print("Working on Input files: ",filename, ", ",signalfile)
if not os.path.isfile(signalfile):
print("File ",signalfile," is not existing. Aborting.")
sys.exit()
# get the date...
r = re.compile('^\D*(\d*)\D*', re.UNICODE)
date = re.search(r, filename).group(1)
print("Date is ",date)
date_ux = time.mktime(datetime.datetime.strptime(date,"%Y%m%d").timetuple())
# load dataframes and reindex
Xdf_loc = pd.read_csv(filename, sep=" ", header = None,)
# print(Xdf_loc.iloc[:3])
Xdf_loc['Milliseconds'] = Xdf_loc[0]
Xdf_loc['Date'] = | pd.to_datetime(date_ux*1000*1000*1000) | pandas.to_datetime |
# <NAME> Python 3.8 2020-08-18 - 2020-09-01 #
# zipgeo.py takes a .csv file with 6 fields and geocodes it
# *by zipcode* first; then via api and address.
# ("AnyID","address","city","state","zip","country")
import pandas as pd
from uszipcode import SearchEngine
import os
import sys
from keys import n_user, bing_key, oc_key
# import geocoding services / # NOMINATIM requires no key
from geopy.geocoders import ArcGIS, Bing, Nominatim, OpenCage
# initialize everything
arcgis = ArcGIS(timeout=100)
bing = Bing(bing_key,timeout=100)
nominatim = Nominatim(user_agent=n_user, timeout=100)
opencage = OpenCage(oc_key, timeout=100)
# choose and order your geocoders in preference order
geocoders = [bing, nominatim, arcgis]
search = SearchEngine(simple_zipcode=True)
# set input directory and filename
currentdir = os.getcwd()
filename = 'fibrtest.csv'
if len(sys.argv) - 1 >= 1:
filename = str(sys.argv[1])
in_file = currentdir + '/data/' + filename
timeout = 100
def zipgeo():
anydata = | pd.io.parsers.read_csv(in_file, dtype={'zip': 'str'}) | pandas.io.parsers.read_csv |
import pandas as pd
from joblib import Parallel, delayed
from epic.bigwig.create_bigwigs import _create_bigwig
def _trunks_flanks_valleys(cdf, trunk_diff, bin_size, distance_allowed):
dfs = []
for cid, cdf in cdf.groupby(((cdf.Bin - cdf.Bin.shift()).abs() > distance_allowed).cumsum()):
enriched_diff = cdf.TotalEnriched >= (cdf.TotalEnriched.max() - trunk_diff)
enriched_sum = (enriched_diff.shift(1) != enriched_diff).cumsum()
grpby = cdf.groupby(enriched_sum)
nb_groups = len(grpby)
max_value = cdf.TotalEnriched.max()
chromosome = cdf.Chromosome.head(1).iloc[0]
cluster_start = str(cdf.head(1).Bin.iloc[0])
cluster_end = str(cdf.tail(1).Bin.iloc[0] + bin_size - 1)
dfs2 = []
# group islands into trunks/flanks/valleys
for (i, rdf) in grpby:
is_trunk = rdf.head(1).TotalEnriched.iloc[0] >= (max_value - trunk_diff)
if not is_trunk and (i == 1 or i == (nb_groups)): # first or last, i.e. flank
status = "flank"
elif not is_trunk:
status = "valley"
else:
status = "trunk"
start = str(rdf.head(1).Bin.iloc[0])
end = str(rdf.tail(1).Bin.iloc[0] + bin_size - 1)
region_id = start + ":" + end
total_enriched = ",".join(rdf.TotalEnriched.astype(str))
bins = ",".join(rdf.Bin.astype(str))
# min_enriched = rdf.TotalEnriched.min()
# median_enriched = rdf.TotalEnriched.median()
gdf3 = pd.DataFrame(rdf.drop("TotalEnriched Chromosome Bin".split(), 1).sum()).T
gdf3.insert(0, "TotalEnriched", total_enriched)
gdf3.insert(0, "Bins", bins)
gdf3.insert(0, "MaxEnrichedCluster", max_value)
# gdf3.insert(0, "MinEnrichedRegion", min_enriched)
# gdf3.insert(0, "MedianEnrichedRegion", median_enriched)
gdf3.insert(0, "Start", int(start))
gdf3.insert(0, "End", int(end))
gdf3.insert(0, "RegionKind", status)
gdf3.insert(0, "RegionID", region_id)
gdf3.insert(0, "ClusterID", "_".join([chromosome, str(cid)]))
gdf3.insert(0, "Chromosome", chromosome)
dfs2.append(gdf3)
df2 = | pd.concat(dfs2, axis=0) | pandas.concat |
import pandas
import numpy as np
from cornellGrading import cornellQualtrics
import os
def genReadingAssignments(infile, outfile):
# generate reading assignments
# infile must be xlsx with two sheets (Readers & Canddiates)
# grab all input data
if isinstance(infile, str):
tmp = pandas.ExcelFile(infile, engine="openpyxl")
readers = tmp.parse("Readers")
candidates = tmp.parse("Candidates")
tmp.close()
readers = readers["Reader Names"].values
candidates = candidates["Candidate Names"].values
else:
readers = infile[0]
candidates = infile[1]
# Each person needs to be read by 2 readers
nperreader = int(np.round(len(candidates) * 2 / len(readers)))
# shuffle candidates and split by readers
clist = np.hstack((candidates.copy(), candidates.copy()))
np.random.shuffle(clist)
out = {}
for reader in readers:
tmp = clist[:nperreader]
while np.unique(tmp).size != tmp.size:
np.random.shuffle(clist)
tmp = clist[:nperreader]
out[reader] = tmp
clist = clist[nperreader:]
# check for unassigned
if len(clist) > 0:
for c in clist:
r = np.random.choice(readers, size=1)[0]
while c in out[r]:
r = np.random.choice(readers, size=1)[0]
out[r] = np.hstack((out[r], c))
# final consistency check
asslist = []
for key, val in out.items():
assert np.unique(val).size == val.size, "{} has non-unique list.".format(key)
asslist = np.hstack((asslist, val))
assert np.all(
np.unique(asslist) == np.sort(candidates)
), "Not all candidates assigned."
for c in candidates:
assert np.where(asslist == c)[0].size == 2, "{} not assigned twice.".format(c)
# write assignemnts out to disk
outdf = pandas.DataFrame()
for key, val in out.items():
outdf = pandas.concat([outdf, pandas.DataFrame({key: val})], axis=1)
ew = | pandas.ExcelWriter(outfile, options={"encoding": "utf-8"}) | pandas.ExcelWriter |
#!/usr/bin/env python
import json
import numpy as np
import pandas as pd
import os
import sys
import tarfile
import tempfile
import rdkit.Chem as rdC
import rdkit.Chem.Descriptors as rdCD
import atomsci.ddm.pipeline.model_pipeline as mp
import atomsci.ddm.pipeline.predict_from_model as pfm
import atomsci.ddm.pipeline.parameter_parser as parse
import atomsci.ddm.utils.model_retrain as mr
from atomsci.ddm.utils import llnl_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import integrative_utilities
def clean(prefix='delaney-processed'):
"""
Clean test files
"""
for f in ['%s_curated.csv'%prefix,
'%s_curated_fit.csv'%prefix,
'%s_curated_external.csv'%prefix,
'%s_curated_predict.csv'%prefix]:
if os.path.isfile(f):
os.remove(f)
def exact_mol_weight(x):
'''
Given SMILES, return exact mol weight
'''
return rdCD.ExactMolWt(rdC.MolFromSmiles(x))
def num_atoms(x):
'''
Given SMILES, return the number of atoms
'''
return len(rdC.MolFromSmiles(x).GetAtoms())
def H1_curate():
"""
Curate dataset for model fitting
"""
if (not os.path.isfile('H1_curated.csv') and
not os.path.isfile('H1_curated_fit.csv') and
not os.path.isfile('H1_curated_external.csv')):
curated_df = | pd.read_csv('../../test_datasets/H1_std.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list( | range(10) | pandas.compat.range |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
st.title("Sentiment Analysis of Tweets about US Airlines")
st.sidebar.title("Sentiment Analysis of Tweets about US Airlines")
st.markdown("This application is a Streamlit dashboard to analyze the sentiment of Tweets 🐦🐦")
st.sidebar.markdown("This application is a Streamlit dashboard to analyze the sentiment of Tweets 🐦🐦")
#Hide Footer
hide_footer_style = """
<style>
.reportview-container .main footer {visibility: hidden;}
"""
st.markdown(hide_footer_style, unsafe_allow_html=True)
#Hide Developer Options
hide_menu_style = """
<style>
#MainMenu {visibility: hidden;}
</style>
"""
st.markdown(hide_menu_style, unsafe_allow_html=True)
DATA_URL = ('Tweets.csv')
@st.cache(persist=True)
def load_data():
data = | pd.read_csv(DATA_URL) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.