prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#Calculate the Linear Regression between Market Caps
import pandas as pd
import numpy as np
import datetime as date
today = date.datetime.now().strftime('%Y-%m-%d')
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
pio.renderers.default = "browser"
from checkonchain.general.coinmetrics_api import *
from checkonchain.btconchain.btc_add_metrics import *
from checkonchain.dcronchain.dcr_add_metrics import *
from checkonchain.general.regression_analysis import *
#Pull Coinmetrics Data for Coins
BTC = btc_add_metrics().btc_coin()
LTC = Coinmetrics_api('ltc',"2011-10-07",today).convert_to_pd()
BCH = Coinmetrics_api('bch',"2017-08-01",today).convert_to_pd()
DAS = Coinmetrics_api('dash',"2014-01-19",today).convert_to_pd()
DCR = dcr_add_metrics().dcr_coin()
XMR = Coinmetrics_api('xmr',"2014-04-18",today).convert_to_pd()
ZEC = Coinmetrics_api('zec',"2016-10-28",today).convert_to_pd()
ETH = Coinmetrics_api('eth',"2015-07-30",today).convert_to_pd()
XRP = Coinmetrics_api('xrp',"2013-01-01",today).convert_to_pd()
#Reduce dataset down to date and a single metric
metric="CapMrktCurUSD"
BTC2 =BTC[['date',metric]]
LTC2 =LTC[['date',metric]]
BCH2 =BCH[['date',metric]]
DAS2 =DAS[['date',metric]]
DCR2 =DCR[['date',metric]]
XMR2 =XMR[['date',metric]]
ZEC2 =ZEC[['date',metric]]
ETH2 =ETH[['date',metric]]
#XRP2 =XRP[['date',metric]]
#Rename all columns
prefix = 'Cap_'
BTC2.columns =['date',prefix+'BTC']
LTC2.columns =['date',prefix+'LTC']
BCH2.columns =['date',prefix+'BCH']
DAS2.columns=['date',prefix+'DAS']
DCR2.columns =['date',prefix+'DCR']
XMR2.columns =['date',prefix+'XMR']
ZEC2.columns =['date',prefix+'ZEC']
ETH2.columns =['date',prefix+'ETH']
XRP2.columns =['date',prefix+'XRP']
#Compile into a single dataframe with all coins
BTC_data = BTC2.dropna(axis=0)
BTC_data = pd.merge_asof(BTC_data,LTC2,on='date')
BTC_data = pd.merge_asof(BTC_data,BCH2,on='date')
BTC_data = pd.merge_asof(BTC_data,DAS2,on='date')
BTC_data = pd.merge_asof(BTC_data,DCR2,on='date')
BTC_data = | pd.merge_asof(BTC_data,XMR2,on='date') | pandas.merge_asof |
import os
import pytest
from shapely.geometry import LineString
from network_wrangler import haversine_distance
from network_wrangler import create_unique_shape_id
from network_wrangler import offset_location_reference
slug_test_list = [
{"text": "I am a roadway", "delim": "_", "answer": "i_am_a_roadway"},
{"text": "I'm a roadway", "delim": "_", "answer": "im_a_roadway"},
{"text": "I am a roadway", "delim": "-", "answer": "i-am-a-roadway"},
{"text": "I am a roadway", "delim": "", "answer": "iamaroadway"},
]
@pytest.mark.travis
@pytest.mark.parametrize("slug_test", slug_test_list)
def test_get_slug(request, slug_test):
print("\n--Starting:", request.node.name)
from network_wrangler.utils import make_slug
slug = make_slug(slug_test["text"], delimiter=slug_test["delim"])
print("From: {} \nTo: {}".format(slug_test["text"], slug))
print("Expected: {}".format(slug_test["answer"]))
assert slug == slug_test["answer"]
@pytest.mark.travis
def test_time_convert(request):
print("\n--Starting:", request.node.name)
time_tests = [
(("00:00:00", "00:00:10"), (0, 10)),
(("0:00", "0:10:00"), (0, 600)),
(("01:02:03", "01:02:23"), (3723, 3743)),
(("1:02", "1:02:13"), (3720, 3733)),
(("25:24:23", "25:24:33"), (91463, 91473)),
(("250:24:23", "250:24:25"), (901463, 901465)),
]
from pandas import DataFrame
df = DataFrame(time_tests, columns=["time", "time_results"])
print("Original Time Series", df)
from network_wrangler.utils import parse_time_spans
df["time"] = df["time"].apply(parse_time_spans)
print("Result Time Series", df)
from pandas.testing import assert_series_equal
| assert_series_equal(df["time"], df["time_results"], check_names=False) | pandas.testing.assert_series_equal |
import json
import os
import numpy as np
import pandas as pd
import sqlalchemy
import logging
# Constants / definitions
# Database constants
SENSOR_LOG_TABLE = 'firefighter_sensor_log'
ANALYTICS_TABLE = 'firefighter_status_analytics'
FIREFIGHTER_ID_COL = 'firefighter_id'
# mySQL needs to be told the firefighter_id column type explicitly in order to generate correct SQL.
FIREFIGHTER_ID_COL_TYPE = sqlalchemy.types.VARCHAR(length=20)
TIMESTAMP_COL = 'timestamp_mins'
# Normally the 'analytics' LED color will be the same as the 'device' LED color, but in a disconnected scenario, they
# may be different. We want to capture both.
STATUS_LED_COL = 'analytics_status_LED'
TWA_SUFFIX = '_twa'
GAUGE_SUFFIX = '_gauge'
MIN_SUFFIX = '_%smin'
GREEN = 1
YELLOW = 2
RED = 3
RANGE_EXCEEDED = -1
# Cache Constants
DATA_START = 'data_start'
DATA_END = 'data_end'
WINDOW_START = 'window_start'
WINDOW_END = 'window_end'
OVERLAP_MINS = 'overlap_mins'
PROPORTION_OF_WINDOW = 'proportion_of_window'
# Status constants - percentages that define green/red status (yellow is the name of a configuration parameter)
GREEN_RANGE_START = 0
RED_RANGE_START = 99
RED_RANGE_END = RED_RANGE_START * 1000 # Can be arbitrarily large, as the next range bound is np.inf.
# Configuration constants - for reading values from config files.
DEFAULT_CONFIG_FILENAME = 'prometeo_config.json'
WINDOWS_AND_LIMITS_PROPERTY = 'windows_and_limits'
WINDOW_MINS_PROPERTY = 'mins'
SUPPORTED_GASES_PROPERTY = 'supported_gases'
YELLOW_WARNING_PERCENT_PROPERTY = 'yellow_warning_percent'
SAFE_ROUNDING_FACTORS_PROPERTY = 'safe_rounding_factors'
GAS_LIMITS_PROPERTY = 'gas_limits'
AUTOFILL_MINS_PROPERTY = 'autofill_missing_sensor_logs_up_to_N_mins'
# Sensor range limitations. These are intentionally hard-coded and not configured. They're used
# to 1. Cross-check that the PPM limits configured for each time-window respects the sensitivity
# range of the sensors and 2. Check when sensor values have gone out of range.
SENSOR_RANGE_PPM = {
'carbon_monoxide' : {'min' : 1 , 'max' : 1000}, # CJMCU-4541 / MICS-4514 Sensor
'nitrogen_dioxide' : {'min' : 0.05, 'max' : 10 } # CJMCU-4541 / MICS-4514 Sensor
}
class GasExposureAnalytics(object):
# Validate the configuration - log helpful error messages if invalid.
def _validate_config(self, config_filename) :
valid_config = True # "Trust, but verify" ;-)
critical_config_issues = []
# Check that all configured windows cover the same set of gases (i.e. that the first window covers the same set of gases as all other windows)
# Note: Set operations are valid for .keys() views [https://docs.python.org/3.8/library/stdtypes.html#dictionary-view-objects]
mismatched_configs_idx = [idx for idx, window in enumerate(self.WINDOWS_AND_LIMITS) if (window[GAS_LIMITS_PROPERTY].keys() != self.WINDOWS_AND_LIMITS[0][GAS_LIMITS_PROPERTY].keys())]
mismatched_configs = []
if mismatched_configs_idx :
mismatched_configs = [self.WINDOWS_AND_LIMITS[0]]
mismatched_configs += [self.WINDOWS_AND_LIMITS[idx] for idx in mismatched_configs_idx]
valid_config = False
message = "%s : The '%s' for every time-window must cover the same set of gases - but these have mis-matches %s" \
% (config_filename, GAS_LIMITS_PROPERTY, mismatched_configs)
self.logger.critical(message)
critical_config_issues += [message]
# Check that the supported gases are covered by the configuration
if not set(self.SUPPORTED_GASES).issubset(self.WINDOWS_AND_LIMITS[0][GAS_LIMITS_PROPERTY].keys()) :
valid_config = False
message = "%s : One or more of the '%s' %s has no limits defined in '%s' %s." \
% (config_filename, SUPPORTED_GASES_PROPERTY, str(self.SUPPORTED_GASES), WINDOWS_AND_LIMITS_PROPERTY, str(list(self.WINDOWS_AND_LIMITS[0][GAS_LIMITS_PROPERTY].keys())))
self.logger.critical(message)
critical_config_issues += [message]
# For each supported gas, check that limits PPM configuration is within that sensor's range.
# The limits must be less than the range of the sensor. For best reporting, the range should be at least
# two or more times the upper limit and a warning will be produced if this is not the case. To to illustrate
# why: Say a firefighter experiences [30mins at 1ppm. Then 30mins at 25ppm] and the 1hr limit is 10ppm. Then
# one hour into the fire, this firefighter has experienced an average of 13ppm per hour, well over the 10ppm
# limit - their status should be ‘Red’. However, if the range of the sensor is 0-10ppm, then the command center
# would actually see their status as *Range Exceeded* (not Red, Yellow or Green), which is not very helpful.
# It's essentially saying "this firefighter's average exposure is unknown - it may be OK or it may not.
# Prometeo can't tell, because the sensors aren't sensitive enough for these conditions". For the firefighter
# to get an accurate report, this sensor would need to have a range of at least 25ppm (and likely more),
# so that exposure could be accurately measured and averaged to 13ppm.
# (Note: the sensor returns *Range Exceeded* to prevent incorrect PPM averages from being calculated.
# e.g. in the above scenario, we do not want to incorrectly calculate an average of 5.5ppm (Green) from a
# sensor showing 30mins at 1ppm and 30mins at 10ppm, the max the sensor can 'see').
for gas in self.SUPPORTED_GASES :
limits = [window[GAS_LIMITS_PROPERTY][gas] for window in self.WINDOWS_AND_LIMITS]
if ( (min(limits) < SENSOR_RANGE_PPM[gas]['min']) or (max(limits) > SENSOR_RANGE_PPM[gas]['max']) ) :
valid_config = False
message = ("%s : One or more of the '%s' configurations %s is incompatible with the range of the '%s' sensor (min: %s, max: %s).") \
% (config_filename, GAS_LIMITS_PROPERTY, limits, gas, SENSOR_RANGE_PPM[gas]['min'], SENSOR_RANGE_PPM[gas]['max'])
self.logger.critical(message)
critical_config_issues += [message]
if ((max(limits)*2) > SENSOR_RANGE_PPM[gas]['max']) :
# This is valid, but not optimal. Produce a warning.
message = ("%s : One or more of the '%s' configurations %s is very close to the range of the '%s' sensor (min: %s, max: %s)." +
"\nSensors shoud have a much larger range than the limits - e.g. 2x at a minimum .") \
% (config_filename, GAS_LIMITS_PROPERTY, limits, gas, SENSOR_RANGE_PPM[gas]['min'], SENSOR_RANGE_PPM[gas]['max'])
self.logger.warning(message)
# Check there's a valid definition of yellow - should be a percentage between 1 and 99
if not ( (self.YELLOW_WARNING_PERCENT > 0) and (self.YELLOW_WARNING_PERCENT < 100) ) :
valid_config = False
message = "%s : '%s' should be greater than 0 and less than 100 (percent), but is %s" \
% (config_filename, YELLOW_WARNING_PERCENT_PROPERTY, self.YELLOW_WARNING_PERCENT)
self.logger.critical(message)
critical_config_issues += [message]
# For each supported gas, check there's a valid factor defined for safe rounding - should be a positive integer.
for gas in self.SUPPORTED_GASES :
if ( (not isinstance(self.SAFE_ROUNDING_FACTORS[gas], int)) or (not (self.SAFE_ROUNDING_FACTORS[gas] >= 0) ) ) :
valid_config = False
message = "%s : '%s' for '%s' should be a positive integer, but is %s" \
% (config_filename, SAFE_ROUNDING_FACTORS_PROPERTY, gas, self.SAFE_ROUNDING_FACTORS[gas])
self.logger.critical(message)
critical_config_issues += [message]
# Check the max number of auto-filled minutes is a positive integer.
if ( (not isinstance(self.AUTOFILL_MINS, int)) or (not (self.AUTOFILL_MINS >= 0) ) ) :
valid_config = False
message = "%s : '%s' should be a positive integer, but is %s" \
% (config_filename, AUTOFILL_MINS_PROPERTY, self.AUTOFILL_MINS)
self.logger.critical(message)
critical_config_issues += [message]
elif (self.AUTOFILL_MINS > 20) :
# Recommended (but not enforced) to be less than 20 mins.
warning = "%s : '%s' is not recommended to be more than 20 minutes, but is %s" \
% (config_filename, AUTOFILL_MINS_PROPERTY, self.AUTOFILL_MINS)
self.logger.warning(warning)
assert valid_config, ''.join([('\nCONFIG ISSUE (%s) : %s' % (idx+1, issue)) for idx, issue in enumerate(critical_config_issues)])
return
# Create an instance of the Prometeo Gas Exposure Analytics, initialising it with a data source and an appropriate
# configuration file.
# list_of_csv_files : Use the supplied CSV files as sensor data instead of the Prometeo DB, so that tests can test
# against a known data. This option should not be used at runtime.
# config_filename : Allow overriding TWA time-window configurations, so that tests can test against a known
# configuration. This option should not be used at runtime, as prometeo uses a relational
# database and the analytics table schema is static, not dynamic.
def __init__(self, list_of_csv_files=None, config_filename=DEFAULT_CONFIG_FILENAME):
# Get a logger and keep its name in sync with this filename
self.logger = logging.getLogger(os.path.basename(__file__))
# Get configuration
with open(os.path.join(os.path.dirname(__file__), config_filename)) as file:
self.CONFIGURATION = json.load(file)
file.close()
# WINDOWS_AND_LIMITS : A list detailing every supported time-window over which to calcuate the time-weighted
# average (label, number of minutes and gas limit gauges for each window) - e.g. from NIOSH, ACGIH, EU-OSHA.
self.WINDOWS_AND_LIMITS = self.CONFIGURATION[WINDOWS_AND_LIMITS_PROPERTY]
# SUPPORTED_GASES : The list of gases that Prometeo devices currently have sensors for.
# To automatically enable analytics for new gases, simply add them to this list.
self.SUPPORTED_GASES = self.CONFIGURATION[SUPPORTED_GASES_PROPERTY]
# YELLOW_WARNING_PERCENT : yellow is a configurable percentage - the status LED will go yellow when any gas
# reaches that percentage (e.g. 80%) of the exposure limit for any time-window.
self.YELLOW_WARNING_PERCENT = self.CONFIGURATION[YELLOW_WARNING_PERCENT_PROPERTY]
# SAFE_ROUNDING_FACTORS : Why round? Because each gas has a number of decimal places that are meaningful and
# beyond which extra digits are trivial. Rounding protects unit tests from brittleness due to these trivial
# differences in computations. If a value changes by more than 1/10th of the smallest unit of the
# most-sensitive gas, then we want to know (e.g. fail a test), any less than that and the change is negligible.
# e.g.: At time of writing, Carbon Monoxide had a range of 0 to 420ppm and Nitrogen Dioxide, had a range
# of 0.1 to 10ppm. So the safe rounding factors for these gases would be 1 decimal place for CO and 2 for NO2.
self.SAFE_ROUNDING_FACTORS = self.CONFIGURATION[SAFE_ROUNDING_FACTORS_PROPERTY]
# AUTOFILL_MINS: A buffer of N mins (e.g. 10 mins) during which the system will assume any missing data just
# means a device is disconnected and the data is temporarily delayed. It will 'treat' the
# missing data (e.g. by substituting an average). After this number of minutes of missing
# sensor data, the system will stop estimating and assume the firefighter has powered off their
# device and left the event.
self.AUTOFILL_MINS = self.CONFIGURATION[AUTOFILL_MINS_PROPERTY]
# Cache of 'earliest and latest observed data points for each firefighter'. Necessary for the AUTOFILL_MINS
# functionality.
self._FF_TIME_SPANS_CACHE = None
# Validate the configuration - log helpful error messages if invalid.
self._validate_config(config_filename)
# db identifiers
SQLALCHEMY_DATABASE_URI = ("mysql+pymysql://"+os.getenv('MARIADB_USERNAME')
+":"+os.getenv("MARIADB_PASSWORD")
+"@"+os.getenv("MARIADB_HOST")
+":"+str(os.getenv("MARIADB_PORT"))
+"/prometeo")
metadata=sqlalchemy.MetaData(SQLALCHEMY_DATABASE_URI)
self._db_engine = metadata.bind
# By default, the analytics will run from a database.
self._from_db = True
# For testing, the analytics can also be run from a set of CSV files.
if list_of_csv_files is not None :
self._from_db = False
self.logger.info("Taking sensor readings *** from CSV ***")
# Allow clients to pass either single (non-list) CSV file path, or a list of CSV file paths
if not isinstance(list_of_csv_files, list) : list_of_csv_files = [list_of_csv_files]
dataframes = []
for csv_file in list_of_csv_files :
df = pd.read_csv(csv_file, engine='python', parse_dates=[TIMESTAMP_COL], index_col = TIMESTAMP_COL)
assert FIREFIGHTER_ID_COL in df.columns, "CSV files is missing key columns %s" % (required_cols)
dataframes.append(df)
# Merge the dataframes (also pre-sort, to speed up test runs and enable debug slicing on the index)
self._sensor_log_from_csv_df = pd.concat(dataframes).sort_index()
# Query the last N hours of sensor logs, where N is the longest configured time-window length. As with all methods
# in this class, sensor data is assumed to be keyed on the floor(minute) timestamp when it was captured - i.e.
# a sensor value captured at 12:00:05 is stored against a timestamp of 12:00:00.
# block_end : The datetime from which to look back when reading the sensor logs (e.g. 'now').
def _get_block_of_sensor_readings(self, block_end) :
# Get the start of the time block to read - i.e. the end time, minus the longest window we're interested in.
# Add 1 min 'correction' to the start times because both SQL 'between' and Pandas slices are *in*clusive and we
# don't want (e.g.) 61 samples in a 60 min block.
one_minute = pd.Timedelta(minutes = 1)
longest_block = max([window['mins'] for window in self.WINDOWS_AND_LIMITS])
block_start = block_end - pd.Timedelta(minutes = longest_block) + one_minute # e.g. 8hrs ago
message = ("Reading sensor log in range [%s to %s]" % (block_start.isoformat(), block_end.isoformat()))
if not self._from_db : message += " (local CSV file mode)"
self.logger.info(message)
sensor_log_df = pd.DataFrame()
ff_time_spans_df = None
if self._from_db :
# Get from database with a non-blocking read (this type of SELECT is non-blocking on
# MariaDB/InnoDB - ref: https://dev.mysql.com/doc/refman/8.0/en/innodb-consistent-read.html)
sql = ("SELECT * FROM " + SENSOR_LOG_TABLE + " where " + TIMESTAMP_COL
+ " between '" + block_start.isoformat() + "' and '" + block_end.isoformat() + "'")
sensor_log_df = (pd.read_sql_query(sql, self._db_engine,
parse_dates=[TIMESTAMP_COL], index_col=TIMESTAMP_COL))
else :
# Get from local CSV files - useful when testing (e.g. using known sensor test data)
sensor_log_df = self._sensor_log_from_csv_df.loc[block_start:block_end,:].copy()
if (sensor_log_df.empty) :
self.logger.info("No 'live' sensor records found in range [%s to %s]"
% (block_start.isoformat(), block_end.isoformat()))
# Reset the cache of 'earliest and latest observed data points for each firefighter'.
# If we didn't do this, firefighters 'data time span' would stretch over multiple days. We want
# it to reset once there's been no data within the longest configured time-window.
self._FF_TIME_SPANS_CACHE = None
else :
# sort is required for several operations, e.g. slicing, re-sampling, etc. Do it once, up-front.
sensor_log_df = sensor_log_df.sort_index()
# Update the cache of 'earliest and latest observed data points for each firefighter'. As firefighters come
# online (and as data comes in after an outage), each new chunk may contain records for firefighters that
# are not yet captured in the cache.
# [DATA_START]: the earliest observed data point for each firefighter - grows as firefighters
# join an event (and different for each Firefighter)
# [DATA_END] : the latest observed data point for each firefighter so far - a moving target, but
# fixed for *this* chunk of data (and potentially different for each Firefighter)
ff_time_spans_in_this_block_df = (pd.DataFrame(sensor_log_df.reset_index()
.groupby(FIREFIGHTER_ID_COL)
[TIMESTAMP_COL].agg(['min', 'max']))
.rename(columns = {'min':DATA_START, 'max':DATA_END}))
if self._FF_TIME_SPANS_CACHE is None :
# First-time cache creation
self._FF_TIME_SPANS_CACHE = pd.DataFrame(ff_time_spans_in_this_block_df)
else :
# Update the earliest and latest observed timestamp for each firefighter.
# note: use pd.merge() not pd.concat() - concat drops the index names causing later steps to crash
self._FF_TIME_SPANS_CACHE = pd.merge(np.fmin(ff_time_spans_in_this_block_df.loc[:, DATA_START],
self._FF_TIME_SPANS_CACHE.loc[:, DATA_START]),
np.fmax(ff_time_spans_in_this_block_df.loc[:, DATA_END],
self._FF_TIME_SPANS_CACHE.loc[:, DATA_END]),
how='outer', on=FIREFIGHTER_ID_COL)
# Take a working copy of the cache, so we can manupulate it during analytic processing.
ff_time_spans_df = self._FF_TIME_SPANS_CACHE.copy()
# Add a buffer of N mins (e.g. 10 mins) to the 'data end'. The system will assume up to this
# many minutes of missing data just means a device is disconnected and the data is temporarily delayed.
# It will 'treat' the missing data (e.g. by substituting an average). After this number of minutes of
# missing sensor data, the system will stop estimating and assume the firefighter has powered
# off their device and left the event.
ff_time_spans_df.loc[:, DATA_END] += pd.Timedelta(minutes = self.AUTOFILL_MINS)
return sensor_log_df, ff_time_spans_df
# Given up to 8 hours of data, calculates the time-weighted average and limit gauge (%) for all firefighters, for
# all supported gases, for all configured time periods.
# sensor_log_chunk_df: A time-indexed dataframe covering up to 8 hours of sensor data for all firefighters,
# for all supported gases. Requires firefighterID and supported gases as columns.
# ff_time_spans_df : A dataset containing the 'earliest and latest observed data points for each
# firefighter'. Necessary for the AUTOFILL_MINS functionality.
# timestamp_key : The minute-quantized timestamp key for which to calculate time-weighted averages.
def _calculate_TWA_and_gauge_for_all_firefighters(self, sensor_log_chunk_df, ff_time_spans_df, timestamp_key) :
# We'll be processing the windows in descending order of length (mins)
windows_in_desc_mins_order = sorted([w for w in self.WINDOWS_AND_LIMITS], key=lambda w: w['mins'], reverse=True)
longest_window_mins = windows_in_desc_mins_order[0]['mins'] # topmost element in the ordered windows
# Get sensor records for the longest time-window. Note: we add 1 min to the start-time, because slicing
# is *in*clusive and we don't want N+1 samples in an N min block of sensor records.
one_minute = pd.Timedelta(minutes = 1)
longest_window_start = timestamp_key - pd.Timedelta(minutes=longest_window_mins) + one_minute
longest_window_df = sensor_log_chunk_df.loc[longest_window_start:timestamp_key, :]
# It's essential to know when a sensor value can't be trusted - i.e. when it has exceeded its range (signalled
# by the value '-1'). When this happens, we need to replace that sensor's value with something that
# both (A) identifies it as untrustworthy and (B) also causes calculated values like TWAs and Gauges to be
# similarly identified. That value is infinity (np.inf). To to illustrate why: Say a firefighter experiences
# [30mins at 1ppm. Then 30mins at 25ppm] and the 1 hour limit is 10ppm. Then 1 hour into the fire, this
# firefighter has experienced an average of 13ppm per hour, well over the 10ppm limit - their status should be
# ‘Red’. However, if the range of the sensor were 0-10ppm, then at best, the sensor could only provide [30mins
# at 1ppm. Then 30mins at 10ppm], averaging to 5.5ppm per hour which is *Green* (not Red or even Yellow). To
# prevent this kind of under-reporting, the device sends '-1' to indicate that the sensor has exceeded its
# range and we substitute that with infinity (np.inf), which then flows correctly through the time-weighted
# average calculations.
longest_window_df.loc[:, self.SUPPORTED_GASES] = (longest_window_df.loc[:, self.SUPPORTED_GASES].mask(
cond=(longest_window_df.loc[:, self.SUPPORTED_GASES] < 0),
other=np.inf))
# To calculate time-weighted averages, every time-slice in the window is quantized ('resampled') to equal
# 1-minute lengths. (it can be done with 'ragged' / uneven time-slices, but the code is more complex and
# hence error-prone, so we use 1-min quantization as standard here). The system is expected to provide data
# that meets this requirement, so this step is defensive. We don't backfill missing entries here.
#
# (note: the double sort_index() here looks odd, but it seems both necessary and fairly low cost:
# 1. Resampling requires the original index to be sorted, reasonably enough. 2. The resampled dataframe
# can't be sliced by date index unless it's sorted too. However these 'extra' sorts don't seem to carry
# a noticeable performance penalty, possibly since the original dataframe is sorted to begin with)
resample_timedelta = pd.Timedelta(minutes = 1)
longest_window_cleaned_df = (longest_window_df
.sort_index()
.groupby(FIREFIGHTER_ID_COL, group_keys=False)
.resample(resample_timedelta).nearest(limit=1)
.sort_index())
# Before doing the main work, save a copy of the data for each device at 'timestamp_key' *if* available
# (may not be, depending on dropouts). Note: this is the first of several chunks of data that we will
# later merge on the timestamp_key.
latest_device_data = []
if (timestamp_key in longest_window_cleaned_df.index) :
# If there's data for a device at 'timestamp_key', get a copy of it. While some if it is used for
# calculating average exposures (e.g. gases, times, firefighter_id), much of it is not (e.g. temperature,
# humidity, battery level) and this data needs to be merged back into the final dataframe.
latest_sensor_readings_df = (longest_window_cleaned_df
.loc[[timestamp_key],:] # the current minute
.reset_index()
.set_index([FIREFIGHTER_ID_COL, TIMESTAMP_COL]) # key to merge on at the end
)
# Store in a list for merging later on
latest_device_data = [latest_sensor_readings_df]
else :
message = "No 'live' sensor records found at timestamp %s. Calculating Time-Weighted Averages anyway..."
self.logger.info(message % (timestamp_key.isoformat()))
# Now the main body of work - iterate over the time windows, calculate their time-weighted averages & limit
# gauge percentages. Then merge all of these bits of info back together (with the original device data) to
# form the overall analytic results dataframe.
calculations_for_all_windows = [] # list of results from each window, for merging at the end
for time_window in windows_in_desc_mins_order :
# Get the relevant slice of the data for this specific time-window, for all supported gas sensor readings
# (and excluding all other columns)
window_mins = time_window['mins']
window_length = pd.Timedelta(minutes = window_mins)
window_start = timestamp_key - window_length + one_minute
analytic_cols = self.SUPPORTED_GASES + [FIREFIGHTER_ID_COL]
window_df = longest_window_cleaned_df.loc[window_start:timestamp_key, analytic_cols]
# If the window is empty, then there's nothing to do, just move on to the next window
if (window_df.empty) :
continue
# Check that there's never more data in the window than there should be (max 1 record per min, per FF)
assert(window_df.groupby(FIREFIGHTER_ID_COL).size().max() <= window_mins)
# Calculate time-weighted average exposure for this time-window.
# A *time-weighted* average, means each sensor reading is multiplied by the length of time the reading
# covers, before dividing by the total time covered. This can get very complicated if readings are unevenly
# spaced or if they get lost, or sent late due to connectivity dropouts. So Prometeo makes two design
# choices that account for these issues, and simplify calculations (reducing opportunities for error).
# (1) The system takes exactly one reading per minute, no more & no less, so the multiplication factor for
# every reading is always 1.
# (2) Any missing/lost sensor readings are approximated by using the average value for that sensor over the
# time-window in question. (Care needs to be taken to ensure that calculations don't inadvertently
# approximate them as '0ppm').
# Since the goal we're after here is to get the average over a time-window, we don't need to actually
# fill-in the missing entries, we can just get the average of the available sensor readings.
window_twa_df = window_df.groupby(FIREFIGHTER_ID_COL).mean()
# The average alone is not enough, we also have to adjust it to reflect how much of the time-window the
# data represents. e.g. Say the 8hr time-weighted average (TWA) exposure limit for CO exposure is 27ppm.
# Now imagine we observe 30ppm in the first 15 mins of an event and then have a connectivity dropout for
# the next 15 mins. What do we show the command center user? It's over the limit for an 8hr average, but
# we're only 30mins into that 8-hour period. So we adjust the TWA to the proportion of the time window
# that has actually elapsed. Note: this implicitly assumes that firefighter exposure is zero before the
# first recorded sensor value and after the last recorded value.
# To work out the window proportion, we (A) calculate the time overlap between the moving window and the
# available data timespans for each Firefighter, then (B) Divide the overlap by the total length of the
# time-window to get the proportion. Finally (C) Multiply the TWAs for each firefighter by the proportion
# for that firefighter.
# (A.1) Get the available data timespans for each Firefighter, only selecting firefighters that are in
# this window. (take a copy so that data from one window does not contaminate the next)
ffs_in_this_window = window_df.loc[:, FIREFIGHTER_ID_COL].unique()
overlap_df = ff_time_spans_df.loc[ff_time_spans_df.index.isin(ffs_in_this_window), :].copy()
# (A.2) Add on the moving window timespans (note: no start correction here because it's not a slice.
overlap_df = overlap_df.assign(**{WINDOW_START : timestamp_key - window_length, WINDOW_END : timestamp_key})
# (A.3) Calculate the overlap between the moving window and the available data timespans for each Firefighter.
# overlap = (earliest_end_time - latest_start_time). Negative overlap is meaningless, so when it happens,
# treat it as zero overlap.
overlap_delta = (overlap_df.loc[:, [DATA_END,WINDOW_END]].min(axis='columns')
- overlap_df.loc[:, [DATA_START,WINDOW_START]].max(axis='columns'))
overlap_df.loc[:, OVERLAP_MINS] = (overlap_delta.transform(
lambda delta: delta.total_seconds()/float(60) if delta.total_seconds() > 0 else 0))
# (B) Divide the overlap by the total length of the time-window to get a proportion. Maximum overlap is 1.
overlap_df.loc[:, PROPORTION_OF_WINDOW] = (overlap_df.loc[:, OVERLAP_MINS].transform(
lambda overlap_mins : overlap_mins/float(window_mins) if overlap_mins < window_mins else 1))
# (C) Multiply the TWAs for each firefighter by the proportion for that firefighter.
# Also apply rounding at this point.
window_twa_df = window_twa_df.multiply(overlap_df.loc[:, PROPORTION_OF_WINDOW], axis='rows')
for gas in self.SUPPORTED_GASES :
window_twa_df.loc[:, gas] = np.round(window_twa_df.loc[:, gas], self.SAFE_ROUNDING_FACTORS[gas])
# Prepare the results for limit gauges and merging
window_twa_df = (window_twa_df
.assign(**{TIMESTAMP_COL: timestamp_key})
.reset_index()
.set_index([FIREFIGHTER_ID_COL, TIMESTAMP_COL]))
# Calculate gas limit gauge - percentage over / under the calculated TWA values
# (force gases and limits to have the same column order as each other before comparing)
gas_limits = [float(time_window[GAS_LIMITS_PROPERTY][gas]) for gas in self.SUPPORTED_GASES]
window_gauge_df = ((window_twa_df.loc[:, self.SUPPORTED_GASES] * 100 / gas_limits)
.round(0)) # we don't need decimal precision for percentages
# Update column titles - add the time period over which we're averaging, so we can merge dataframes later
# without column name conflicts.
window_twa_df = window_twa_df.add_suffix((TWA_SUFFIX + MIN_SUFFIX) % (str(time_window[WINDOW_MINS_PROPERTY])))
window_gauge_df = window_gauge_df.add_suffix((GAUGE_SUFFIX + MIN_SUFFIX) % (str(time_window[WINDOW_MINS_PROPERTY])))
# Now save the results from this time window as a single merged dataframe (TWAs and Limit Gauges)
calculations_for_all_windows.append( | pd.concat([window_twa_df, window_gauge_df], axis='columns') | pandas.concat |
'''
Created with love by Sigmoid
@Author - <NAME> - <EMAIL>
'''
import numpy as np
import pandas as pd
import random
import sys
from random import randrange
from .SMOTE import SMOTE
from sklearn.mixture import GaussianMixture
from .erorrs import NotBinaryData, NoSuchColumn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
class SCUT:
def __init__(self,k: "int > 0" = 5, seed: float = 42, binary_columns : list = None) -> None:
'''
Setting up the algorithm
:param k: int, k>0, default = 5
Number of neighbours which will be considered when looking for simmilar data points
:param seed: intt, default = 42
seed for random
:param binary_columns: list, default = None
The list of columns that should have binary values after balancing.
'''
self.__k = k
if binary_columns is None:
self.__binarize = False
self.__binary_columns = None
else:
self.__binarize = True
self.__binary_columns = binary_columns
self.__seed = seed
np.random.seed(self.__seed)
random.seed(self.__seed)
def __to_binary(self) -> None:
'''
If the :param binary_columns: is set to True then the intermediate values in binary columns will be rounded.
'''
for column_name in self.__binary_columns:
serie = self.synthetic_df[column_name].values
threshold = (self.df[column_name].max() + self.df[column_name].min()) / 2
for i in range(len(serie)):
if serie[i] >= threshold:
serie[i] = self.df[column_name].max()
else:
serie[i] = self.df[column_name].min()
self.synthetic_df[column_name] = serie
def __infinity_check(self, matrix : 'np.array') -> 'np.array':
'''
This function replaces the infinity and -infinity values with the minimal and maximal float python values.
:param matrix: 'np.array'
The numpy array that was generated my the algorithm.
:return: 'np.array'
The numpy array with the infinity replaced values.
'''
matrix[matrix == -np.inf] = sys.float_info.min
matrix[matrix == np.inf] = sys.float_info.max
return matrix
def balance(self, df : pd.DataFrame, target : str):
'''
Reducing the dimensionality of the data
:param df: pandas DataFrame
Data Frame on which the algorithm is applied
:param y_column: string
The target name of the value that we have to predict
'''
#get unique values from target column
unique = df[target].unique()
if target not in df.columns:
raise NoSuchColumn(f"{target} isn't a column of passed data frame")
self.target= target
self.df = df.copy()
#training columns
self.X_columns = [column for column in self.df.columns if column != target]
class_samples = []
for clas in unique:
class_samples.append(self.df[self.df[self.target]==clas][self.X_columns].values)
classes_nr_samples = []
for clas in unique:
classes_nr_samples.append(len(self.df[self.df[self.target]==clas]))
#getting mean number of samples of all classes
mean = np.mean(classes_nr_samples)
#undersampling by SCUT algorithm
for i,class_sample in enumerate(class_samples):
#cheching for
if classes_nr_samples[i]>mean:
clusters = []
selected_samples = []
#getting nr of samples to take
difference = classes_nr_samples[i] - mean
#clusster every class
gmm = GaussianMixture(3)
gmm.fit(class_sample)
#getting predictions
labels = gmm.predict(class_sample)
clusters_df = pd.DataFrame(np.array(class_sample), columns=self.X_columns)
clusters_df.loc[:, target] = labels
unique_clusters = clusters_df[self.target].unique()
#Selecting random samples from every cluster, and repeat untill we get the "difference" that we needed
for clas in unique_clusters:
#clusster sets
clusters.append(clusters_df[clusters_df[self.target]==clas].values)
while difference != 0:
for cluster in clusters:
#selecting random sample and add it to list
index = randrange(len(cluster))
example = cluster[index]
selected_samples.append(example)
difference-=1
if difference == 0:
break
#create the new dataset including selected samples
cluster_df = pd.DataFrame(np.array(selected_samples), columns=self.df.columns)
cluster_df.loc[:, self.target] = unique[i]##
if i==0:
self.new_df = cluster_df
else:
self.new_df = pd.concat([cluster_df,self.new_df],axis=0)
else:
cluster_df = pd.DataFrame(np.array(class_sample), columns=self.X_columns)
cluster_df.loc[:, self.target] = unique[i]##
if i==0:
self.new_df = cluster_df
else:
self.new_df = | pd.concat([cluster_df,self.new_df],axis=0) | pandas.concat |
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pandas as pd
import tkinter as tk
from tkinter import *
from tkinter import filedialog
from datetime import datetime
from tkinter.messagebox import showinfo
from tkinter.messagebox import showwarning
from tkinter.font import Font
from os import path
import sys
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
print('running in a PyInstaller bundle')
else:
print('running in a normal Python process')
# ----------------------------------------------- GLOBAL VARIABLES
REF_FILE_NAME = ""
TEST_FILE_NAME = ""
MAIN_FILE_ONE_NAME = ""
MAIN_FILE_TWO_NAME = ""
SEQ_ONE_GAPS = []
SEQ_TWO_GAPS = []
SEQ_THREE_GAPS = []
SEQ_FOUR_GAPS = []
FOUR_SEQ_ALIGN = False
ALIGNMENT_WARNING = False
THRESHOLD = 1
LVL_SEL = "L1&L2"
PEP_COLUMNS = ["peptide", "Peptide", "Peptide sequence"]
START_COLUMNS = ["start", "Start", "Peptide start"]
REF_PEPTIDE_MAX_LENGTH = 50
TEST_PEPTIDE_MAX_LENGTH = 50
MAIN_PEPTIDE_MAX_LENGTH = 50
# ----------------------------------------------- CLASSES
class MainApplication:
def __init__(self, master):
self.master = master
self.canvas = tk.Canvas(master, width=550, height=690) # width=550, height=690
# to make a frame
self.frame = tk.Frame(master, bg='white')
############################################################################################
# Frame Input
# this frame is placed in the original frame
title_font = Font(family="Calibri", size=12, weight="bold")
self.frame_input = tk.Frame(self.frame, bd='10', padx=3, pady=3)
self.label_input_files = tk.Label(self.frame_input, text='Input File Paths', bd='3', fg='blue', font=title_font)
self.label_epitope_predictions = tk.Label(self.frame_input, text='Epitope Predictions', bd='3', fg='blue')
self.label_ref = tk.Label(self.frame_input, text='Sequence A', bd='3')
self.label_test = tk.Label(self.frame_input, text='Sequence B', bd='3')
self.label_database_searches = tk.Label(self.frame_input, text='Database Searches', bd='3', fg='blue')
self.label_main_one = tk.Label(self.frame_input, text='Sequence A', bd='3')
self.label_main_two = tk.Label(self.frame_input, text='Sequence B', bd='3')
self.entry_ref = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_test = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_main_one = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_main_two = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_ref = tk.Button(self.frame_input, text='Browse', command=self.browse_ref)
self.button_test = tk.Button(self.frame_input, text='Browse', command=self.browse_test)
self.button_main_one = tk.Button(self.frame_input, text='Browse', command=self.browse_main_one)
self.button_main_two = tk.Button(self.frame_input, text='Browse', command=self.browse_main_two)
self.label_indels_title = tk.Label(self.frame_input, text='CAVES Indel Search', bd='3', fg='blue')
self.label_indels_alignment = tk.Label(self.frame_input, text='Alignment', bd='3')
self.entry_indels_alignment = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_indels_alignment = tk.Button(self.frame_input, text='Browse', command=self.browse_alignment)
self.label_threshold_title = tk.Label(self.frame_input, text='Minimum Peptide Length', bd='3', fg='blue',
font=title_font)
self.entry_threshold = tk.Entry(self.frame_input, bd='3', justify="center")
self.label_threshold_helper = tk.Label(self.frame_input,
text='Default minimum is 1 amino acid',
bd='3', fg='red')
self.label_radio_title = tk.Label(self.frame_input, text='Level Selection', bd='3', fg='blue',
font=title_font)
self.frame_radio_buttons = tk.Frame(self.frame_input, bd='0', padx=3, pady=3)
self.level_selection = IntVar()
self.level_selection.set(1)
self.radio_both_lvls = Radiobutton(self.frame_radio_buttons, text="Level 1 and 2",
command=self.config_L1L2_entries,
variable=self.level_selection, value=1).grid(row=0, column=1, padx=50)
self.radio_lvl_one_only = Radiobutton(self.frame_radio_buttons, text="Level 1 only",
command=self.config_L1_only_entries,
variable=self.level_selection, value=2).grid(row=0, column=2)
self.radio_lvl_two_only = Radiobutton(self.frame_radio_buttons, text="Level 2 only",
command=self.config_L2_only_entries,
variable=self.level_selection, value=3).grid(row=0, column=3, padx=50)
self.label_result_file_title = tk.Label(self.frame_input, text='Results File', bd='3', fg='blue',
font=title_font)
self.entry_result_file = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_result_path = tk.Button(self.frame_input, text='Browse', command=self.browse_result_path)
# place used to place the widgets in the frame
self.label_input_files.place(relx=-0.005, rely=-0.01, relheight=0.05)
self.label_epitope_predictions.place(relx=0.025, rely=0.06, relheight=0.035)
self.label_ref.place(relx=0.05, rely=0.12, relheight=0.035)
self.entry_ref.place(relx=0.20, rely=0.12, relwidth=0.55, relheight=0.035)
self.button_ref.place(relx=0.80, rely=0.12, relheight=0.030)
self.label_test.place(relx=0.05, rely=0.18, relheight=0.035)
self.entry_test.place(relx=0.20, rely=0.18, relwidth=0.55, relheight=0.035)
self.button_test.place(relx=0.80, rely=0.18, relheight=0.030)
self.label_database_searches.place(relx=0.025, rely=0.26, relheight=0.035)
self.label_main_one.place(relx=0.05, rely=0.32, relheight=0.035)
self.entry_main_one.place(relx=0.20, rely=0.32, relwidth=0.55, relheight=0.035)
self.button_main_one.place(relx=0.80, rely=0.32, relheight=0.030)
self.label_main_two.place(relx=0.05, rely=0.38, relheight=0.035)
self.entry_main_two.place(relx=0.20, rely=0.38, relwidth=0.55, relheight=0.035)
self.button_main_two.place(relx=0.80, rely=0.38, relheight=0.030)
self.label_indels_title.place(relx=0.025, rely=0.46, relheight=0.035)
self.label_indels_alignment.place(relx=0.06, rely=0.52, relheight=0.035)
self.entry_indels_alignment.place(relx=0.20, rely=0.52, relwidth=0.55, relheight=0.035)
self.button_indels_alignment.place(relx=0.80, rely=0.52, relheight=0.030)
self.label_threshold_title.place(relx=-0.005, rely=0.60, relheight=0.05)
self.entry_threshold.place(relx=0.10, rely=0.69, relwidth=0.05, relheight=0.030)
self.label_threshold_helper.place(relx=0.175, rely=0.69, relheight=0.030)
self.label_radio_title.place(relx=-0.005, rely=0.76, relheight=0.05)
# Radio buttons are placed in their own frame (self.frame_radio_buttons)
self.label_result_file_title.place(relx=-0.005, rely=0.90, relheight=0.035)
self.entry_result_file.place(relx=0.20, rely=0.955, relwidth=0.55, relheight=0.035)
self.button_result_path.place(relx=0.80, rely=0.955, relheight=0.030)
############################################################################################
# placing the buttons below
submit_font = Font(family="Calibri", size=12)
self.frame_button = tk.Frame(self.frame, bd='3', padx=3, pady=3)
self.button_start = tk.Button(self.frame_button, text='Compare', font=submit_font, command=self.start_clicked)
self.button_cancel = tk.Button(self.frame_button, text='Cancel', font=submit_font, command=master.destroy)
self.button_cancel.place(relx=0.6, rely=0.22, relheight=0.6, relwidth=0.18)
self.button_start.place(relx=0.8, rely=0.22, relheight=0.6, relwidth=0.18)
###############################################################################################
# all the frames are placed in their respective positions
self.frame_input.place(relx=0.005, rely=0.005, relwidth=0.99, relheight=0.906)
self.frame_radio_buttons.place(relx=0.005, rely=0.8275, relwidth=1, relheight=1)
self.frame_button.place(relx=0.005, rely=0.915, relwidth=0.99, relheight=0.08)
self.frame.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.96)
self.canvas.pack()
##############################################################################################
def start_clicked(self):
print("Compare Start")
init_objects(self.level_selection.get())
global LVL_SEL
print("Reading epitope predictions: Sequence A file")
ref_raw = init_ref_raw(self.entry_ref.get().strip())
if ref_raw is None:
return
if LVL_SEL != "L2Only":
print("Reading epitope predictions: Sequence B file")
test_raw = init_test_raw(self.entry_test.get().strip())
if test_raw is None:
return
if LVL_SEL != "L1Only":
print("Reading database searches: Sequence A file")
main_raw_one = init_main_raw(self.entry_main_one.get().strip())
if main_raw_one is None:
print("Unable to read database searches: Sequence A file")
return
global MAIN_FILE_ONE_NAME
MAIN_FILE_ONE_NAME = self.entry_main_one.get().split("/").pop()
if LVL_SEL == "L1&L2":
print("Reading database searches: Sequence B file")
main_raw_two = init_main_raw(self.entry_main_two.get().strip())
if main_raw_two is None:
print("Unable to read database searches: Sequence B file")
return
global MAIN_FILE_TWO_NAME
MAIN_FILE_TWO_NAME = self.entry_main_two.get().split("/").pop()
if self.entry_indels_alignment.get().strip() != "":
print("Reading alignment file")
if not init_alignment(self.entry_indels_alignment.get().strip()):
print("Unable to create gap character lists")
return
else:
print("Empty alignment file path")
return
if not init_threshold(self.entry_threshold.get().strip()):
print("Minimum peptide length input error: minimum length set to 1")
result_file = generate_result_file(self.entry_result_file.get())
ref_dictionary = create_test_comparison_dict(ref_raw.to_dict('split'), REF_FILE_NAME)
if LVL_SEL == "L1&L2":
test_dictionary = create_test_comparison_dict(test_raw.to_dict('split'), TEST_FILE_NAME)
main_dict_one = create_main_comparison_dict(main_raw_one.to_dict('split'), MAIN_FILE_ONE_NAME)
main_dict_two = create_main_comparison_dict(main_raw_two.to_dict('split'), MAIN_FILE_TWO_NAME)
generate_test_comparison_results(ref_dictionary, test_dictionary)
generate_main_comparison_results(L1_matched_dict, "L1m", main_dict_one, main_dict_two)
generate_main_comparison_results(L1_partial_dict, "L1p", main_dict_one, main_dict_two)
generate_main_comparison_results(L1_novel_dict, "L1n", main_dict_one, main_dict_two)
finalize_L1L2_results(result_file)
if LVL_SEL == "L1Only":
test_dictionary = create_test_comparison_dict(test_raw.to_dict('split'), TEST_FILE_NAME)
generate_test_comparison_results(ref_dictionary, test_dictionary)
finalize_L1Only_results(result_file)
if LVL_SEL == "L2Only":
main_dict_one = create_main_comparison_dict(main_raw_one.to_dict('split'), MAIN_FILE_ONE_NAME)
generate_main_comparison_results(ref_dictionary, "L2", main_dict_one)
finalize_L2Only_results(result_file)
print("Compared")
showinfo("CAVES", "Comparison Complete!")
def browse_ref(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_ref.delete(0, tk.END)
self.entry_ref.insert(0, filename)
def browse_test(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_test.delete(0, tk.END)
self.entry_test.insert(0, filename)
def browse_main_one(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_main_one.delete(0, tk.END)
self.entry_main_one.insert(0, filename)
def browse_main_two(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_main_two.delete(0, tk.END)
self.entry_main_two.insert(0, filename)
def browse_alignment(self):
fasta_exts = [("FASTA files", "*.fasta"), ("FASTA files", "*.fna"), ("FASTA files", "*.ffn"),
("FASTA files", "*.faa"), ("FASTA files", "*.frn"), ("FASTA files", "*.fa"),
("FASTA files", "*.fsa")]
filename = filedialog.askopenfilename(title="Select a File", filetypes=fasta_exts)
self.entry_indels_alignment.delete(0, tk.END)
self.entry_indels_alignment.insert(0, filename)
def browse_result_path(self):
time = datetime.now().strftime("%Y-%m-%d_%H%M%S")
filename = filedialog.asksaveasfilename(initialfile="results_"+time, title="Results File",
filetypes=[("Excel files", "*.xlsx")])
self.entry_result_file.delete(0, tk.END)
self.entry_result_file.insert(0, filename)
def config_L1L2_entries(self):
self.entry_ref.config(state='normal')
self.entry_test.config(state='normal')
self.entry_main_one.config(state='normal')
self.entry_main_two.config(state='normal')
def config_L1_only_entries(self):
self.entry_main_one.delete(0, tk.END)
self.entry_main_two.delete(0, tk.END)
self.entry_ref.config(state='normal')
self.entry_test.config(state='normal')
self.entry_main_one.config(state='disabled')
self.entry_main_two.config(state='disabled')
def config_L2_only_entries(self):
self.entry_test.delete(0, tk.END)
self.entry_main_two.delete(0, tk.END)
self.entry_ref.config(state='normal')
self.entry_test.config(state='disabled')
self.entry_main_one.config(state='normal')
self.entry_main_two.config(state='disabled')
class ResultSheetObject:
def __init__(self):
self.origin_file_one = []
self.peptide_one = []
self.start_one = []
self.end_one = []
self.length_one = []
self.letters_matched = []
self.letters_matched_length = []
self.origin_file_two = []
self.peptide_two = []
self.start_two = []
self.end_two = []
self.length_two = []
self.mutated_pos = []
class PeptideObject:
def __init__(self, new_file, new_pep, new_start, new_end, new_length, new_suffix):
self.origin_file = new_file
self.peptide = new_pep
self.start = new_start
self.end = new_end
self.length = new_length
self.suffix = new_suffix
# ----------------------------------------------- RESULT OBJECTS
L1_novel = ResultSheetObject()
L1_partial = ResultSheetObject()
L1_matched = ResultSheetObject()
L2_novel = ResultSheetObject()
L2_partial = ResultSheetObject()
L2_matched = ResultSheetObject()
L1_novel_L2_novel = ResultSheetObject()
L1_novel_L2_partial = ResultSheetObject()
L1_novel_L2_matched = ResultSheetObject()
L1_partial_L2_novel = ResultSheetObject()
L1_partial_L2_partial = ResultSheetObject()
L1_partial_L2_matched = ResultSheetObject()
L1_matched_L2_novel = ResultSheetObject()
L1_matched_L2_partial = ResultSheetObject()
L1_matched_L2_matched = ResultSheetObject()
# ----------------------------------------------- LEVEL 1 DICTIONARIES
L1_novel_dict = {}
L1_partial_dict = {}
L1_matched_dict = {}
# ----------------------------------------------- FUNCTIONS
def init_objects(lvl_sel):
global REF_FILE_NAME
REF_FILE_NAME = ""
global TEST_FILE_NAME
TEST_FILE_NAME = ""
global MAIN_FILE_ONE_NAME
MAIN_FILE_ONE_NAME = ""
global MAIN_FILE_TWO_NAME
MAIN_FILE_TWO_NAME = ""
global SEQ_ONE_GAPS
SEQ_ONE_GAPS = []
global SEQ_TWO_GAPS
SEQ_TWO_GAPS = []
global SEQ_THREE_GAPS
SEQ_THREE_GAPS = []
global SEQ_FOUR_GAPS
SEQ_FOUR_GAPS = []
global FOUR_SEQ_ALIGN
FOUR_SEQ_ALIGN = False
global LVL_SEL
if lvl_sel == 1:
LVL_SEL = "L1&L2"
elif lvl_sel == 2:
LVL_SEL = "L1Only"
else:
LVL_SEL = "L2Only"
global L1_novel
L1_novel = ResultSheetObject()
global L1_partial
L1_partial = ResultSheetObject()
global L1_matched
L1_matched = ResultSheetObject()
global L2_novel
L2_novel = ResultSheetObject()
global L2_partial
L2_partial = ResultSheetObject()
global L2_matched
L2_matched = ResultSheetObject()
global L1_novel_L2_novel
L1_novel_L2_novel = ResultSheetObject()
global L1_novel_L2_partial
L1_novel_L2_partial = ResultSheetObject()
global L1_novel_L2_matched
L1_novel_L2_matched = ResultSheetObject()
global L1_partial_L2_novel
L1_partial_L2_novel = ResultSheetObject()
global L1_partial_L2_partial
L1_partial_L2_partial = ResultSheetObject()
global L1_partial_L2_matched
L1_partial_L2_matched = ResultSheetObject()
global L1_matched_L2_novel
L1_matched_L2_novel = ResultSheetObject()
global L1_matched_L2_partial
L1_matched_L2_partial = ResultSheetObject()
global L1_matched_L2_matched
L1_matched_L2_matched = ResultSheetObject()
global L1_novel_dict
L1_novel_dict = {}
global L1_partial_dict
L1_partial_dict = {}
global L1_matched_dict
L1_matched_dict = {}
def init_ref_raw(file_path):
if not path.exists(file_path):
print("Unable to find predictions file: " + file_path)
return None
global REF_FILE_NAME
REF_FILE_NAME = file_path.strip().split("/").pop() # gives last item in list which is file
ref_raw = None
for pep_col in PEP_COLUMNS:
for start_col in START_COLUMNS:
try:
ref_raw = pd.read_csv(file_path, index_col=False, usecols={start_col, pep_col})
break
except ValueError:
ref_raw = None
else:
continue
break
if ref_raw is None:
print("Unable to read epitope predictions: Sequence A file")
print("Value Error: Check to make sure the column names are among the following:")
print("Start Columns:", START_COLUMNS)
print("Peptide Columns:", PEP_COLUMNS)
return ref_raw
def init_test_raw(file_path):
if not path.exists(file_path):
print("Unable to find predictions file: " + file_path)
return None
global TEST_FILE_NAME
TEST_FILE_NAME = file_path.strip().split("/").pop() # gives last item in list which is file
test_raw = None
for pep_col in PEP_COLUMNS:
for start_col in START_COLUMNS:
try:
test_raw = pd.read_csv(file_path, index_col=False, usecols={start_col, pep_col})
break
except ValueError:
test_raw = None
else:
continue
break
if test_raw is None:
print("Unable to read epitope predictions: Sequence B file")
print("Value Error: Check to make sure the column names are among the following:")
print("Start Columns:", START_COLUMNS)
print("Peptide Columns:", PEP_COLUMNS)
return test_raw
def init_main_raw(file_path):
if not path.exists(file_path):
print("Unable to find database search file: " + file_path)
return None
try:
main_raw = pd.read_csv(file_path, index_col=False, skiprows=1, usecols={"Description", "Starting Position"})
return main_raw
except ValueError:
print("Value Error: Check to make sure the column names are: 'Description' and 'Starting Position'")
return None
def init_alignment(file_path):
if not path.exists(file_path):
print("Unable to find alignment file from path: " + file_path)
return False
result = init_gap_chars(file_path)
return result
def init_gap_chars(file_path):
try:
with open(file_path) as my_file:
sequences = build_sequences(my_file)
global ALIGNMENT_WARNING
ALIGNMENT_WARNING = False
global SEQ_ONE_GAPS
SEQ_ONE_GAPS = find_gap_chars(sequences[0])
if LVL_SEL != "L2Only":
global SEQ_TWO_GAPS
SEQ_TWO_GAPS = find_gap_chars(sequences[1])
if LVL_SEL != "L1Only":
global SEQ_THREE_GAPS
SEQ_THREE_GAPS = find_gap_chars(sequences[2])
if sequences[3] and LVL_SEL == "L1&L2":
global SEQ_FOUR_GAPS
SEQ_FOUR_GAPS = find_gap_chars(sequences[3])
global FOUR_SEQ_ALIGN
FOUR_SEQ_ALIGN = True
if ALIGNMENT_WARNING:
showwarning("WARNING", "CAVES has detected a large amount of successive gap characters in your alignment "
"file. Epitopes predicted from highly dissimilar sequences are unlikely to produce "
"biologically relevant matches when compared due to inherent differences in the "
"amino acid composition. \n\nCAVES will still run but we caution against using "
"these results.")
except:
print("Alignment file processing error")
return False
return True
def build_sequences(file):
sequences = ["", "", "", ""]
curr_seq = -1
for line in file:
if line[0] == ">":
curr_seq += 1
else:
line = line.rstrip("\n")
sequences[curr_seq] += line
return sequences
def find_gap_chars(seq):
gaps = []
amino_acid_count = 1
row = 0
for char in seq:
if char == '-':
row += 1
if row >= 10:
global ALIGNMENT_WARNING
ALIGNMENT_WARNING = True
gaps.append(amino_acid_count)
else:
row = 0
amino_acid_count += 1
return gaps
def init_threshold(threshold_entry):
global THRESHOLD
if not threshold_entry:
THRESHOLD = 1
return True
try:
if not str.isdigit(threshold_entry):
raise
THRESHOLD = int(threshold_entry)
except:
THRESHOLD = 1
return False
return True
def generate_result_file(file_path):
try:
filename, file_extension = path.splitext(file_path)
if file_extension and file_extension == ".xlsx":
result_file = pd.ExcelWriter(file_path)
else:
result_file = | pd.ExcelWriter(filename + ".xlsx") | pandas.ExcelWriter |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/26 13:10
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_ | numeric(temp_df["最低价"]) | pandas.to_numeric |
"""Auxiliary functions to generate dataframes.
"""
##########
# Imports
##########
import pandas as pd
import random
##########
# Int df Size
##########
def create_int_df_size(cols: int, rows: int) -> "dataframe":
"""Returns test dataframe with passed number of columns and rows.
"""
df_dict = {
chr(65 + i) : range(0, rows) for i in range(cols)
}
return pd.DataFrame(data = df_dict)
##########
# Str df Size
##########
def create_mixed_df_size(cols: int, rows: int) -> 'dataframe':
"""Returns test df with mixed number of columns and rows
"""
str_data = ['a', 'b', 'c', 'd']
data = dict()
for i in range(cols):
if i % 2:
val = [ random.choice(str_data) * random.randint(0, 2) for _ in range(rows)]
else:
val = range(0, rows)
data[chr(65 + i)] = val
return pd.DataFrame(data)
##########
# String dfs
##########
def create_str_df1() -> "dataframe":
"""Returns test dataframe.
"""
data = [
['Cat', 'Python', 'The'],
['doG', 'r', 'A'],
['biRd', 'SQL', None]
]
columns = ('animals', 'languages', 'determiners')
df = pd.DataFrame(data=data, columns=columns)
return df
def create_str_df2() -> "dataframe":
"""Creates string dataframe
"""
states = [
## Regular word
'california',
'colorado',
'utah',
'indiana',
'texas',
'oklahoma',
'Nevada',
## Random space in word
'cali fornia',
'colo rado',
'ut ah',
'i ndiana',
'te xas',
'okla homa',
'Neva da',
]
data = {
'a': range(100),
'b': range(100),
'c': [random.choice(['a', 'b', 'c'])* random.randint(0, 5) for _ in range(100)],
'states': [random.choice(states) + ('s' if random.random() < 0.1 else '') for _ in range(100)] # Change for s
}
return | pd.DataFrame(data) | pandas.DataFrame |
import os
import pandas as pd
import sys
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import random
import statistics
import itertools
JtokWh = 2.7778e-7
weight_factor = [1.50558832,0.35786005,1.0]
path_test = os.path.join(sys.path[0])
representative_days_path= os.path.join(path_test,'ScenarioReduction')
sizing_path = os.path.join(path_test, 'Design_results')
operation_path = os.path.join(path_test, 'Operation_results')
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
editable_data_sizing_path =os.path.join(path_test, 'editable_values_design.csv')
editable_data_sizing = pd.read_csv(editable_data_sizing_path, header=None, index_col=0, squeeze=True).to_dict()[1]
num_scenarios = int(editable_data['num_scenarios'])
num_clusters= int(editable_data['Cluster numbers'])+2
population_size = int(editable_data['population_size'])
population_size_sizing = int(editable_data_sizing['population_size'])
idf_names = ['ASHRAE901_OfficeMedium_STD2019_Denver','ASHRAE901_Hospital_STD2019_Denver','ASHRAE901_RetailStandalone_STD2019_Denver']
thermal_eff_dict = {idf_names[0]:0.8,idf_names[1]:0.8125,idf_names[2]:0.82}
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
start_year = int(editable_data['starting_year'])
end_year = int(editable_data['ending_year'])
epw_names = []
for i_temp in range(num_scenarios):
for i_solar in range(num_scenarios):
epw_names.append('T_'+str(i_temp)+'_S_'+str(i_solar))
demand_directory = os.path.join(path_test, 'IDFBuildingsFiles')
# epw main files
weather_data = []
weather_data_names =[]
weather_data_bar_names =[]
for year in range(start_year,end_year+1):
weather_data.append(city+'_'+str(lat)+'_'+str(lon)+'_psm3_60_'+str(year))
weather_data_names.append('AMY '+str(year))
weather_data_bar_names.append('AMY \n'+str(year))
dict_EPWs= {}
dict_EPWs['AMYs']=weather_data
dict_EPWs['TMYs']=['USA_UT_Salt.Lake.City.Intl.AP.725720_TMY','USA_UT_Salt.Lake.City.725720_TMY2','USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3']
#dict_EPWs['TMYs']=['USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3']
dict_EPWs['FMYs']=['USA_Salt Lake City Intl AP_HadCM3-A2-'+str(2050),'USA_Salt Lake City Intl AP_HadCM3-A2-'+str(2080)]
dict_EPWs_names= {}
dict_EPWs_names['AMYs']=weather_data_names
dict_EPWs_names['TMYs']=['TMY','TMY2','TMY3']
#dict_EPWs_names['TMYs']=['TMY3']
dict_EPWs_names['FMYs']=['FMY '+str(2050),'FMY '+str(2080)]
dict_EPWs_bar_names= {}
dict_EPWs_bar_names['AMYs']=weather_data_bar_names
dict_EPWs_bar_names['TMYs']=['TMY \n','TMY2 \n','TMY3 \n']
dict_EPWs_bar_names['FMYs']=['FMY \n'+str(2050),'FMY \n'+str(2080)]
main_weather_epw = {}
output_directory = os.path.join(path_test, 'IDFBuildingsFiles')
results_compare = os.path.join(path_test, 'Results')
if not os.path.exists(results_compare):
os.makedirs(results_compare)
years=list(range(1998,2020))
years= ['AMY \n'+str(i) for i in years]
years.append('TMY')
years.append('TMY2')
years.append('TMY3')
years.append('FMY \n'+str(2050))
years.append('FMY \n'+str(2080))
### Representative Days ###
def representative_day_function():
global representative_days,weight_representative_day_main,weight_representative_day_scenario
representative_days = defaultdict(list)
weight_representative_day_scenario = defaultdict(list)
weight_representative_day_main = defaultdict(list)
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
for representative_day in range(num_clusters):
rep_day= pd.read_csv(os.path.join(representative_days_path,output_prefix + 'Represent_days_modified_'+str(representative_day)+ '.csv'))
representative_days[output_prefix].append(rep_day)
weight_representative_day_main[output_prefix].append(rep_day['Percent %']/100*365)
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
for representative_day in range(num_clusters):
rep_day= pd.read_csv(os.path.join(representative_days_path,output_prefix + 'Represent_days_modified_'+str(representative_day)+ '.csv'))
representative_days[output_prefix].append(rep_day)
weight_representative_day_scenario[output_prefix].append(rep_day['Percent %']/100*365)
### Energy Demands ###
def energy_demands():
global elect_buildings,gas_buildings,cool_buildings,elect_annual,gas_annual,cool_annual,total_elect_buildings,total_gas_buildings,total_cool_buildings,total_elect_annual,total_gas_annual,total_cool_annual
elect_buildings = defaultdict(list)
gas_buildings = defaultdict(list)
cool_buildings = defaultdict(list)
elect_annual= defaultdict(list)
gas_annual = defaultdict(list)
cool_annual = defaultdict(list)
total_elect_buildings= []
total_gas_buildings = []
total_cool_buildings = []
total_elect_annual= []
total_gas_annual = []
total_cool_annual = []
for scenario in range(len(epw_names)):
sum_electricity_buildings = []
sum_heating_buildings = []
sum_cooling_buildings = []
sum_electricity_annual = []
sum_heating_annual = []
sum_cooling_annual = []
for building_type in idf_names:
output_prefix = building_type+'_'+epw_names[scenario]+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = (data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'] - data['Cooling:Electricity [J](Hourly)'])*JtokWh
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
cool_data = (data['Cooling:Electricity [J](Hourly)'])*JtokWh
elect_buildings[building_type].append(elect_data)
gas_buildings[building_type].append(heat_data)
cool_buildings[building_type].append(cool_data)
elect_annual[building_type].append(sum(elect_data))
gas_annual[building_type].append(sum(heat_data))
cool_annual[building_type].append(sum(cool_data))
sum_electricity_buildings.append(elect_data*weight_factor[idf_names.index(building_type)])
sum_heating_buildings.append(heat_data*weight_factor[idf_names.index(building_type)])
sum_cooling_buildings.append(cool_data*weight_factor[idf_names.index(building_type)])
sum_electricity_annual.append(sum(elect_data*weight_factor[idf_names.index(building_type)]))
sum_heating_annual.append(sum(heat_data*weight_factor[idf_names.index(building_type)]))
sum_cooling_annual.append(sum(cool_data*weight_factor[idf_names.index(building_type)]))
total_elect_buildings.append(sum(sum_electricity_buildings))
total_gas_buildings.append(sum(sum_heating_buildings))
total_cool_buildings.append(sum(sum_cooling_buildings))
total_elect_annual.append(sum(sum_electricity_annual))
total_gas_annual.append(sum(sum_heating_annual))
total_cool_annual.append(sum(sum_cooling_annual))
global elect_buildings_main,gas_buildings_main,cool_buildings_main,elect_annual_main,gas_annual_main,cool_annual_main,total_elect_buildings_main,total_gas_buildings_main,total_cool_buildings_main,total_elect_annual_main,total_gas_annual_main,total_cool_annual_main
elect_buildings_main = defaultdict(list)
gas_buildings_main = defaultdict(list)
cool_buildings_main = defaultdict(list)
elect_annual_main = defaultdict(list)
gas_annual_main = defaultdict(list)
cool_annual_main = defaultdict(list)
total_elect_annual_main = []
total_gas_annual_main = []
total_cool_annual_main = []
total_elect_buildings_main = []
total_gas_buildings_main = []
total_cool_buildings_main = []
global output_prefix_short
output_prefix_short ={}
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
output_prefix_short[output_prefix] = dict_EPWs_names[key][dict_EPWs[key].index(epw_file_name)]
sum_electricity_buildings_main = []
sum_heating_buildings_main = []
sum_cooling_buildings_main = []
sum_electricity_annual_main = []
sum_heating_annual_main = []
sum_cooling_annual_main = []
for building_type in idf_names:
output_prefix = building_type+'_'+epw_file_name+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = (data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'] - data['Cooling:Electricity [J](Hourly)'])*JtokWh
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
cool_data = (data['Cooling:Electricity [J](Hourly)'])*JtokWh
elect_buildings_main[building_type].append(elect_data)
gas_buildings_main[building_type].append(heat_data)
cool_buildings_main[building_type].append(cool_data)
elect_annual_main[building_type].append(sum(elect_data))
gas_annual_main[building_type].append(sum(heat_data))
cool_annual_main[building_type].append(sum(cool_data))
sum_electricity_buildings_main.append(elect_data*weight_factor[idf_names.index(building_type)])
sum_heating_buildings_main.append(heat_data*weight_factor[idf_names.index(building_type)])
sum_cooling_buildings_main.append(cool_data*weight_factor[idf_names.index(building_type)])
sum_electricity_annual_main.append(sum(elect_data*weight_factor[idf_names.index(building_type)]))
sum_heating_annual_main.append(sum(heat_data*weight_factor[idf_names.index(building_type)]))
sum_cooling_annual_main.append(sum(cool_data*weight_factor[idf_names.index(building_type)]))
total_elect_buildings_main.append(sum(sum_electricity_buildings_main))
total_gas_buildings_main.append(sum(sum_heating_buildings_main))
total_cool_buildings_main.append(sum(sum_cooling_buildings_main))
total_elect_annual_main.append(sum(sum_electricity_annual_main))
total_gas_annual_main.append(sum(sum_heating_annual_main))
total_cool_annual_main.append(sum(sum_cooling_annual_main))
j = 0
def generate_combo_plots(mode):
SMALL_SIZE = 30
MEDIUM_SIZE = 32
BIGGER_SIZE = 38
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (30,20)
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
for i in range(27)]
if mode=='seperate':
marker = itertools.cycle(('v','+','s','^','o','x','*'))
for building_type in idf_names:
plt.figure()
j=0
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
label = output_prefix_short[output_prefix]
if key=='AMYs':
year_selected_number=int(label.replace('AMY',''))
if year_selected_number==2019 or year_selected_number==2018 or year_selected_number==2016 or year_selected_number==2014 or year_selected_number==2012:
plt.scatter(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000),xytext=(cool_annual_main[building_type][j]/1000*1.0005,gas_annual_main[building_type][j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='TMYs':
if label=='TMY3':
plt.scatter(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000),xytext=(cool_annual_main[building_type][j]/1000*1.0005,gas_annual_main[building_type][j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='FMYs':
plt.scatter(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(cool_annual_main[building_type][j]/1000,gas_annual_main[building_type][j]/1000),xytext=(cool_annual_main[building_type][j]/1000*1.0005,gas_annual_main[building_type][j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,building_type+'_annual_main_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
marker_list = ['v','+','s','^','o','x','*','s','>','<']
color_list= [ 'tab:blue', 'tab:orange','tab:green','black','yellow','tab:red','tab:cyan','tab:olive','peru','tab:purple']
for building_type in idf_names:
plt.figure()
label_dict = {}
for scenario in range(len(epw_names)):
key_label = round(cool_annual[building_type][scenario]/1000,0)
#if key_label not in label_dict.keys():
#label_dict[key_label] = epw_names[scenario]
label_short= epw_names[scenario].replace('_','')
marker = marker_list[int(label_short[1])]
color = color_list[int(label_short[3])]
if (int(label_short[1])==0 or int(label_short[1])==5 or int(label_short[1])==9) and (int(label_short[3])==0 or int(label_short[3])==5 or int(label_short[3])==9):
if int(label_short[1])==0:
label_T='Tmin'
elif int(label_short[1])==5:
label_T='Tmed'
elif int(label_short[1])==9:
label_T='Tmax'
if int(label_short[3])==0:
label_S='Smin'
elif int(label_short[3])==5:
label_S='Smed'
elif int(label_short[3])==9:
label_S='Smax'
label = label_T + label_S
if building_type==idf_names[1]:
weight_factor_pareto_front =0.9955
else:
weight_factor_pareto_front = 1
plt.scatter(cool_annual[building_type][scenario]/1000,gas_annual[building_type][scenario]/1000,color=color,marker=marker,s=300, cmap=cmap, label=label_short)
plt.annotate(label,xy=(cool_annual[building_type][scenario]/1000,gas_annual[building_type][scenario]/1000),xytext=(cool_annual[building_type][scenario]/1000*1.005*weight_factor_pareto_front,gas_annual[building_type][scenario]/1000),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,building_type+'_annual_scenario_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
elif mode =='total':
marker = itertools.cycle(('v','+','s','^','o','x','*'))
plt.figure()
j=0
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
label = output_prefix_short[output_prefix]
if key=='AMYs':
year_selected_number=int(label.replace('AMY',''))
if year_selected_number==2019 or year_selected_number==2018 or year_selected_number==2016 or year_selected_number==2014 or year_selected_number==2012:
plt.scatter(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000),xytext=(total_cool_annual_main[j]/1000*1.0005,total_gas_annual_main[j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='TMYs':
if label=='TMY3':
plt.scatter(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000),xytext=(total_cool_annual_main[j]/1000*1.0005,total_gas_annual_main[j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
elif key=='FMYs':
plt.scatter(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000,s=400, cmap=cmap, label = label,marker=next(marker))
plt.annotate(label,xy=(total_cool_annual_main[j]/1000,total_gas_annual_main[j]/1000),xytext=(total_cool_annual_main[j]/1000*1.0005,total_gas_annual_main[j]/1000*1.0005),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
j = j+1
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,'total_annual_main_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
marker_list = ['v','+','s','^','o','x','*','s','>','<']
color_list= [ 'tab:blue', 'tab:orange','tab:green','black','yellow','tab:red','tab:cyan','tab:olive','peru','tab:purple']
label_dict = {}
for scenario in range(len(epw_names)):
key_label = round(total_cool_annual[scenario]/1000,0)
#if key_label not in label_dict.keys():
# label_dict[key_label] = epw_names[scenario]
label_short= epw_names[scenario].replace('_','')
marker = marker_list[int(label_short[1])]
color = color_list[int(label_short[3])]
if (int(label_short[1])==0 or int(label_short[1])==5 or int(label_short[1])==9) and (int(label_short[3])==0 or int(label_short[3])==5 or int(label_short[3])==9):
if int(label_short[1])==0:
label_T='Tmin'
elif int(label_short[1])==5:
label_T='Tmed'
elif int(label_short[1])==9:
label_T='Tmax'
if int(label_short[3])==0:
label_S='Smin'
elif int(label_short[3])==5:
label_S='Smed'
elif int(label_short[3])==9:
label_S='Smax'
label = label_T + label_S
plt.scatter(total_cool_annual[scenario]/1000,total_gas_annual[scenario]/1000,s=300,c=color,marker=marker, cmap=cmap, label=label_short)
plt.annotate(label,xy=(total_cool_annual[scenario]/1000,total_gas_annual[scenario]/1000),xytext=(total_cool_annual[scenario]/1000*1.001,total_gas_annual[scenario]/1000*1.001),
arrowprops=dict(arrowstyle="-"),fontsize=MEDIUM_SIZE)
plt.xlabel('Total Cooling Demand (MWh)')
plt.ylabel('Total Hot Water Demand (MWh)')
plt.savefig(os.path.join(results_compare,'total_annual_scenario_combo_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
def stats_energy_demands():
cols_revised = ['Office Medium','Hospital','Retail stand-alone', 'Total']
weight_factor_dict = {idf_names[0]:weight_factor[0],idf_names[1]:weight_factor[1],idf_names[2]:weight_factor[2]}
stats_table_seperate = defaultdict(list)
k=0
for building_type in idf_names:
#stats_table_seperate[k].append(round(np.mean(elect_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual_main[building_type])*100/np.mean(elect_annual_main[building_type]),2))
stats_table_seperate[k].append(round(np.mean(gas_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual_main[building_type])*100/np.mean(gas_annual_main[building_type]),2))
stats_table_seperate[k].append(round(np.mean(cool_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual_main[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual_main[building_type])*100/np.mean(cool_annual_main[building_type]),2))
k = k+1
stats_table_total = []
#stats_table_total.append(round(np.mean(total_elect_annual_main)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual_main)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual_main)*100/np.mean(total_elect_annual_main),2))
stats_table_total.append(round(np.mean(total_gas_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual_main)*100/np.mean(total_gas_annual_main),2))
stats_table_total.append(round(np.mean(total_cool_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual_main)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual_main)*100/np.mean(total_cool_annual_main),2))
statistics_table = {#'Elect Mean': [stats_table_seperate[0][0],stats_table_seperate[1][0],stats_table_seperate[2][0],stats_table_total[0]],
#'Elect STD': [stats_table_seperate[0][1],stats_table_seperate[1][1],stats_table_seperate[2][1],stats_table_total[1]],
#'CV \% Elect': [stats_table_seperate[0][2],stats_table_seperate[1][2],stats_table_seperate[2][2],stats_table_total[2]],
'Heat Mean': [stats_table_seperate[0][3],stats_table_seperate[1][3],stats_table_seperate[2][3],stats_table_total[3]],
'Heat STD': [stats_table_seperate[0][4],stats_table_seperate[1][4],stats_table_seperate[2][4],stats_table_total[4]],
'CV \% Heat': [stats_table_seperate[0][5],stats_table_seperate[1][5],stats_table_seperate[2][5],stats_table_total[5]],
'Cool Mean': [stats_table_seperate[0][6],stats_table_seperate[1][6],stats_table_seperate[2][6],stats_table_total[6]],
'Cool STD': [stats_table_seperate[0][7],stats_table_seperate[1][7],stats_table_seperate[2][7],stats_table_total[7]],
'CV \% Cool': [stats_table_seperate[0][8],stats_table_seperate[1][8],stats_table_seperate[2][8],stats_table_total[8]]}
df_statistics_table= pd.DataFrame(statistics_table)
df_statistics_table.insert(0, "", cols_revised, True)
for i in range(1,len(df_statistics_table.columns)*2):
if i%2!=0:
df_statistics_table.insert(i, "&", ["&"]*len(df_statistics_table), True)
df_statistics_table.insert(i, "\\\\ \hline", ["\\\\ \hline"]*len(df_statistics_table), True)
df_statistics_table.to_csv(os.path.join(results_compare,'stats_main_seperate_demand_WC_table.csv'))
stats_table_seperate = defaultdict(list)
weight_factor_dict = {idf_names[0]:weight_factor[0],idf_names[1]:weight_factor[1],idf_names[2]:weight_factor[2]}
k=0
for building_type in idf_names:
#print(
#building_type,np.std(elect_annual[building_type])*weight_factor_dict[building_type]/1000)
#stats_table_seperate[k].append(round(np.mean(elect_annual[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual[building_type])*weight_factor_dict[building_type]/1000,2))
#stats_table_seperate[k].append(round(np.std(elect_annual[building_type])*100/np.mean(elect_annual[building_type]),2))
stats_table_seperate[k].append(round(np.mean(gas_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(gas_annual[building_type])*100/np.mean(gas_annual[building_type]),2))
stats_table_seperate[k].append(round(np.mean(cool_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual[building_type])*weight_factor_dict[building_type]/1000,2))
stats_table_seperate[k].append(round(np.std(cool_annual[building_type])*100/np.mean(cool_annual[building_type]),2))
k = k+1
stats_table_total = []
#stats_table_total.append(round(np.mean(total_elect_annual)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual)/1000,2))
#stats_table_total.append(round(np.std(total_elect_annual)*100/np.mean(total_elect_annual),2))
stats_table_total.append(round(np.mean(total_gas_annual)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual)/1000,2))
stats_table_total.append(round(np.std(total_gas_annual)*100/np.mean(total_gas_annual),2))
stats_table_total.append(round(np.mean(total_cool_annual)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual)/1000,2))
stats_table_total.append(round(np.std(total_cool_annual)*100/np.mean(total_cool_annual),2))
statistics_table = {#'Elect Mean': [stats_table_seperate[0][0],stats_table_seperate[1][0],stats_table_seperate[2][0],stats_table_total[0]],
#'Elect STD': [stats_table_seperate[0][1],stats_table_seperate[1][1],stats_table_seperate[2][1],stats_table_total[1]],
#'CV \% Elect': [stats_table_seperate[0][2],stats_table_seperate[1][2],stats_table_seperate[2][2],stats_table_total[2]],
'Heat Mean': [stats_table_seperate[0][3],stats_table_seperate[1][3],stats_table_seperate[2][3],stats_table_total[3]],
'Heat STD': [stats_table_seperate[0][4],stats_table_seperate[1][4],stats_table_seperate[2][4],stats_table_total[4]],
'CV \% Heat': [stats_table_seperate[0][5],stats_table_seperate[1][5],stats_table_seperate[2][5],stats_table_total[5]],
'Cool Mean': [stats_table_seperate[0][6],stats_table_seperate[1][6],stats_table_seperate[2][6],stats_table_total[6]],
'Cool STD': [stats_table_seperate[0][7],stats_table_seperate[1][7],stats_table_seperate[2][7],stats_table_total[7]],
'CV \% Cool': [stats_table_seperate[0][8],stats_table_seperate[1][8],stats_table_seperate[2][8],stats_table_total[8]]}
df_statistics_table= pd.DataFrame(statistics_table)
df_statistics_table.insert(0, "", cols_revised, True)
for i in range(1,len(df_statistics_table.columns)*2):
if i%2!=0:
df_statistics_table.insert(i, "&", ["&"]*len(df_statistics_table), True)
df_statistics_table.insert(i, "\\\\ \hline", ["\\\\ \hline"]*len(df_statistics_table), True)
df_statistics_table.to_csv(os.path.join(results_compare,'stats_scenario_seperate_WC_demand_table.csv'))
def bar_energy_demands(mode):
SMALL_SIZE = 30
MEDIUM_SIZE = 32
BIGGER_SIZE = 38
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (45,15)
if mode=='seperate':
for building_type in idf_names:
print(building_type, 'highest elect', years[elect_annual_main[building_type].index(np.max(elect_annual_main[building_type]))],
' heat', years[gas_annual_main[building_type].index(np.max(gas_annual_main[building_type]))],
' cool', years[cool_annual_main[building_type].index(np.max(cool_annual_main[building_type]))],
)
print(building_type, 'lowest elect', years[elect_annual_main[building_type].index(np.min(elect_annual_main[building_type]))],
' heat', years[gas_annual_main[building_type].index(np.min(gas_annual_main[building_type]))],
' cool', years[cool_annual_main[building_type].index(np.min(cool_annual_main[building_type]))],
)
r = np.arange(n)
width = 0.25
plt.figure()
#plt.bar(r,[number/1000 for number in elect_annual_main[building_type]],width = width,color='darkorange', edgecolor = 'black',label='Annual Electricity')
plt.bar(r,[number/1000 for number in gas_annual_main[building_type]],width = width,color='darkred', edgecolor = 'black',label = 'Annual Hot Water')
plt.bar(r+width,[number/1000 for number in cool_annual_main[building_type]],width = width,color='darkblue', edgecolor = 'black',label = 'Annual Cooling')
plt.xlabel('Weather Files')
plt.ylabel('Energy Demands (MWh)')
plt.xticks(r + width/2,years)
#plt.yticks
plt.legend(loc='center left')
plt.ticklabel_format(style='plain', axis='y')
#plt.title('annual energy demands of' + building_type)
plt.savefig(os.path.join(results_compare,building_type+'_bar_annual_main_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
elif mode =='total':
print(#'total', 'highest elect', years[total_elect_annual_main.index(np.max(total_elect_annual_main))],
' heat', years[total_gas_annual_main.index(np.max(total_gas_annual_main))],
' cool', years[total_cool_annual_main.index(np.max(total_cool_annual_main))],
)
print(#'total', 'lowest elect', years[total_elect_annual_main.index(np.min(total_elect_annual_main))],
' heat', years[total_gas_annual_main.index(np.min(total_gas_annual_main))],
' cool', years[total_cool_annual_main.index(np.min(total_cool_annual_main))],
)
print(#'total range','elect', (np.max(total_elect_annual_main)-np.min(total_elect_annual_main))/1000,
'heat',(np.max(total_gas_annual_main)-np.min(total_gas_annual_main))/1000,
'cool', (np.max(total_cool_annual_main)-np.min(total_cool_annual_main))/1000)
n=len(total_elect_annual_main)
r = np.arange(n)
width = 0.25
plt.figure()
#plt.bar(r,[number/1000 for number in total_elect_annual_main],width = width,color='darkorange', edgecolor = 'black',label='Annual Electricity')
plt.bar(r,[number/1000 for number in total_gas_annual_main],width = width,color='darkred', edgecolor = 'black',label = 'Annual Hot Water')
plt.bar(r+width,[number/1000 for number in total_cool_annual_main],width = width,color='darkblue', edgecolor = 'black',label = 'Annual Cooling')
plt.xlabel('Weather Files')
plt.ylabel('Energy Demands (MWh)')
plt.xticks(r + width/2,years)
#plt.yticks(fontsize=BIGGER_SIZE)
plt.legend(loc='center left')
plt.ticklabel_format(style='plain', axis='y')
#plt.title('Total annual energy demands')
plt.savefig(os.path.join(results_compare,'total_annual_main_demands_WC'+'.png'),dpi=100,facecolor='w',bbox_inches='tight')
plt.close()
def hist_scenarios(mode):
SMALL_SIZE = 20
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (10,8)
if mode=='seperate':
for building_type in idf_names:
#plt.ylabel('Percentage %')
#plt.hist([number/1000 for number in elect_annual[building_type]],color='darkorange',bins=10,weights=np.ones(len([number/1000 for number in elect_annual[building_type]]))*100 / len([number/1000 for number in elect_annual[building_type]]))
#plt.xlabel('Total Electricity Demand (MWh)')
#plt.savefig(os.path.join(results_compare,'hist_'+building_type+'_annual_main_electricity_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.ylabel('Percentage %')
plt.hist([number/1000 for number in gas_annual[building_type]],color='darkred',bins=10,weights=np.ones(len([number/1000 for number in gas_annual[building_type]]))*100 / len([number/1000 for number in gas_annual[building_type]]))
plt.xlabel('Total Heating Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_'+building_type+'_annual_main_heating_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.ylabel('Percentage %')
plt.hist([number/1000 for number in cool_annual[building_type]],color='darkblue',bins=10,weights=np.ones(len([number/1000 for number in cool_annual[building_type]]))*100 / len([number/1000 for number in cool_annual[building_type]]))
plt.xlabel('Total Cooling Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_'+building_type+'_annual_main_cooling_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
elif mode =='total':
#plt.ylabel('Percentage %')
#plt.hist([number/1000 for number in total_elect_annual],color='darkorange',bins=10,weights=np.ones(len([number/1000 for number in total_elect_annual]))*100 / len([number/1000 for number in total_elect_annual]))
#plt.xlabel('Total Electricity Demand (MWh)')
#plt.savefig(os.path.join(results_compare,'hist_total_annual_main_electricity_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.hist([number/1000 for number in total_gas_annual],color='darkred',bins=10,weights=np.ones(len([number/1000 for number in total_gas_annual]))*100 / len([number/1000 for number in total_gas_annual]))
plt.ylabel('Percentage %')
plt.xlabel('Total Heating Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_total_annual_main_heating_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
plt.hist([number/1000 for number in total_gas_annual],color='darkblue',bins=10,weights=np.ones(len([number/1000 for number in total_gas_annual]))*100 / len([number/1000 for number in total_gas_annual]))
plt.ylabel('Percentage %')
plt.xlabel('Total Cooling Demand (MWh)')
plt.savefig(os.path.join(results_compare,'hist_total_annual_main_cooling_WC_demand'+'.png'),dpi=100,facecolor='w')
plt.close()
energy_demands()
generate_combo_plots('seperate')
generate_combo_plots('total')
#bar_energy_demands('seperate')
#bar_energy_demands('total')
#hist_scenarios('total')
#hist_scenarios('seperate')
#stats_energy_demands()
### Sizing of DES ###
def sizing():
global annual_df_object_sizing_main,annual_df_operation_sizing_main
annual_df_object_sizing_main= {}
annual_df_operation_sizing_main = {}
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
file_name = output_prefix+city+'_Discrete_EF_'+str(float(editable_data_sizing['renewable percentage']) )+'_design_'+str(int(editable_data_sizing['num_iterations']))+'_'+str(editable_data_sizing['population_size'])+'_'+str(editable_data_sizing['num_processors'])+'_processors'
annual_df_object_sizing_main[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name, 'objectives.csv'))
annual_df_operation_sizing_main[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name, 'sizing_all.csv'))
global annual_df_object_sizing_scenario, annual_df_operation_sizing_scenario
annual_df_object_sizing_scenario= {}
annual_df_operation_sizing_scenario = {}
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
file_name = output_prefix+city+'_Discrete_EF_'+str(float(editable_data_sizing['renewable percentage']) )+'_design_'+str(int(editable_data_sizing['num_iterations']))+'_'+str(editable_data_sizing['population_size'])+'_'+str(editable_data_sizing['num_processors'])+'_processors'
annual_df_object_sizing_scenario[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name , 'objectives.csv'))
annual_df_operation_sizing_scenario[output_prefix]=pd.read_csv(os.path.join(sizing_path,file_name, 'sizing_all.csv'))
def main_paretofront_sizing():
global sorted_annual_df_object_sizing_main,output_prefix_short
SMALL_SIZE = 22
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (30,15)
plt.figure()
j=0
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
for i in range(27)]
sorted_cost = []
output_prefix_short ={}
marker = itertools.cycle(('v','+','s','^','o','x','*'))
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
output_prefix_short[output_prefix] = dict_EPWs_names[key][dict_EPWs[key].index(epw_file_name)]
annual_df_object_sizing_main[output_prefix]=annual_df_object_sizing_main[output_prefix].sort_values('Cost ($)')
annual_df_object_sizing_main[output_prefix]=annual_df_object_sizing_main[output_prefix].reset_index()
if key is 'AMYs':
year_selected_number=int(output_prefix_short[output_prefix].replace('AMY',''))
if year_selected_number==2019 or year_selected_number==2018 or year_selected_number==2016 or year_selected_number==2014 or year_selected_number==2012:
sorted_cost.append(annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
elif key is 'TMYs':
if epw_file_name=='USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3':
sorted_cost.append(annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
else:
sorted_cost.append(annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost = sorted(sorted_cost)
sorted_annual_df_object_sizing_main = {}
for i in sorted_cost:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = 'total_'+epw_file_name+'_'
if annual_df_object_sizing_main[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_object_sizing_main[output_prefix] =annual_df_object_sizing_main[output_prefix]
sorted_cost_scenario = []
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].sort_values('Cost ($)')
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].reset_index()
sorted_cost_scenario.append(annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost_scenario = sorted(sorted_cost_scenario)
sorted_annual_df_object_sizing_scenario = {}
for i in sorted_cost_scenario:
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
if annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_object_sizing_scenario[output_prefix] =annual_df_object_sizing_scenario[output_prefix]
j=0
#fig, ax = plt.subplots()
for key in sorted_annual_df_object_sizing_main:
output_prefix = key
cost = [i/10**6 for i in sorted_annual_df_object_sizing_main[output_prefix]['Cost ($)']]
emissions = [j/10**6 for j in sorted_annual_df_object_sizing_main[output_prefix]['Emission (kg CO2)']]
label = output_prefix_short[output_prefix]
#plt.scatter(cost,emissions,c=color[j], s=100, cmap=cmap,marker=next(marker))
#plt.title('Cost and emissions trade-off')
if j==0:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*1.05, emissions[-1]),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:blue'
elif j==1:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.9, emissions[0]*1.15),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:orange'
elif j==2:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*1.05, emissions[-1]*0.8),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:green'
elif j==3:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.85, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:purple'
elif j==4:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*1.05, emissions[-1]*0.8),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'black'
elif j==5:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.85, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:red'
elif j==6:
plt.annotate(label,xy=(cost[-1], emissions[-1]),xytext=(cost[-1]*0.9, emissions[-1]*1.4),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:cyan'
elif j==7:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.85, emissions[0]*1.2),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
color = 'tab:olive'
plt.scatter(cost,emissions,c=color, s=100, cmap=cmap,marker=next(marker))
j = j+1
j=0
#plt.legend()
plt.xlabel("Cost (million $)")
plt.ylabel("Emissions (million kg $CO_2$)")
plt.savefig(os.path.join(results_compare ,'ParetoFront_sizing.png'),dpi=100,facecolor='w',bbox_inches='tight')
def scenario_paretofront_sizing():
SMALL_SIZE = 22
MEDIUM_SIZE = 24
BIGGER_SIZE = 28
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (30,15)
plt.figure()
j=0
sorted_cost = []
output_prefix_short ={}
sorted_cost_scenario = []
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].sort_values('Cost ($)')
annual_df_object_sizing_scenario[output_prefix]=annual_df_object_sizing_scenario[output_prefix].reset_index()
sorted_cost_scenario.append(annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost_scenario = sorted(sorted_cost_scenario)
sorted_annual_df_object_sizing_scenario = {}
for i in sorted_cost_scenario:
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
label = output_prefix.replace('_','').replace('total','')
if annual_df_object_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6 == i and (int(label[1])==0 or int(label[1])==5 or int(label[1])==9) and (int(label[3])==0 or int(label[3])==5 or int(label[3])==9):
sorted_annual_df_object_sizing_scenario[output_prefix] =annual_df_object_sizing_scenario[output_prefix]
j=0
marker_list = ['v','+','s','^','o','x','*','s','>','<']
color_list= [ 'tab:blue', 'tab:orange','tab:green','black','yellow','tab:red','tab:cyan','tab:olive','peru','tab:purple']
for key in sorted_annual_df_object_sizing_scenario:
output_prefix = key
cost = [i/10**6 for i in sorted_annual_df_object_sizing_scenario[output_prefix]['Cost ($)']]
emissions = [j/10**6 for j in sorted_annual_df_object_sizing_scenario[output_prefix]['Emission (kg CO2)']]
label = key.replace('_','').replace('total','')
int_marker= int(label[1])
int_color = int(label[3])
#print(int_marker, type(int_marker), marker[int_marker],len( marker))
marker = marker_list[int_marker]
color = color_list[int_color]
plt.scatter(cost,emissions,c=color, s=300, cmap=cmap, label = label,marker=marker)
#plt.title('Cost and emissions trade-off')
plt.xlabel("Cost (million $)")
plt.ylabel("Emissions (million kg $CO_2$)")
if int(label[1])==0:
label_T='Tmin'
elif int(label[1])==5:
label_T='Tmed'
elif int(label[1])==9:
label_T='Tmax'
if int(label[3])==0:
label_S='Smin'
elif int(label[3])==5:
label_S='Smed'
elif int(label[3])==9:
label_S='Smax'
label = label_T + label_S
if j == 0:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.90, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
else:
plt.annotate(label,xy=(cost[0], emissions[0]),xytext=(cost[0]*0.88, emissions[0]*1.1),
arrowprops=dict(arrowstyle="->"),fontsize=MEDIUM_SIZE)
j=j+1
#plt.legend()
plt.savefig(os.path.join(results_compare ,'scenario_ParetoFront_sizing.png'),dpi=100,facecolor='w',bbox_inches='tight')
def stats_scenario_sizing():
global sorted_annual_df_operation_sizing_scenario
statistics_table = {}
mean_table = defaultdict(list)
std_table = defaultdict(list)
CV_table = defaultdict(list)
cost_points= defaultdict(list)
emissions_points=defaultdict(list)
label_points=defaultdict(lambda: defaultdict(list))
sorted_cost = []
output_prefix_short ={}
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
annual_df_operation_sizing_scenario[output_prefix]=annual_df_operation_sizing_scenario[output_prefix].sort_values('Cost ($)')
annual_df_operation_sizing_scenario[output_prefix]=annual_df_operation_sizing_scenario[output_prefix].reset_index()
sorted_cost.append(annual_df_operation_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6)
sorted_cost = sorted(sorted_cost)
sorted_annual_df_operation_sizing_scenario = {}
for i in sorted_cost:
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
if annual_df_operation_sizing_scenario[output_prefix]['Cost ($)'][0]/10**6 == i:
sorted_annual_df_operation_sizing_scenario[output_prefix] =annual_df_operation_sizing_scenario[output_prefix]
cols = ['Boilers Capacity (kW)', 'CHP Electricty Capacity (kW)', 'Battery Capacity (kW)','Solar Area (m^2)','Swept Area (m^2)','Emission (kg CO2)','Cost ($)']
cols_revised = ['Boilers (kW)', 'CHP (kW)', 'Battery (kW)','Solar (m^2)','Wind (m^2)','Emissions (million ton)','Cost (million \$)']
for point in range(population_size_sizing):
for scenario in range(len(epw_names)):
output_prefix = 'total_'+epw_names[scenario]+'_'
cost_points[point].append(sorted_annual_df_operation_sizing_scenario[output_prefix]['Cost ($)'][point])
emissions_points[point].append(sorted_annual_df_operation_sizing_scenario[output_prefix]['Emission (kg CO2)'][point])
for component in cols:
label_points[point][component].append(sorted_annual_df_operation_sizing_scenario[output_prefix][component][point])
for point in range(population_size_sizing):
for component in cols:
if len(label_points[point][component])!=0:
if component=='Emission (kg CO2)' or component=='Cost ($)':
std_table[point].append(round(statistics.stdev(label_points[point][component])/10**6,2))
mean_table[point].append(round(np.mean(label_points[point][component])/10**6,2))
else:
std_table[point].append(round(statistics.stdev(label_points[point][component]),2))
mean_table[point].append(round(np.mean(label_points[point][component]),2))
if np.mean(label_points[point][component])!=0:
CV_table[point].append(round(statistics.stdev(label_points[point][component])*100/np.mean(label_points[point][component]),2))
else:
CV_table[point].append(0)
statistics_table = {'Mean PP1': mean_table[0], 'STD PP1': std_table[0], 'CV \% PP1': CV_table[0],
'Mean medium cost': mean_table[24], 'STD medium cost': std_table[24], 'CV \% PP5': CV_table[24],
'Mean max cost': mean_table[49], 'STD max cost': std_table[49], 'CV \% PP9': CV_table[49]
}
df_statistics_table= | pd.DataFrame(statistics_table) | pandas.DataFrame |
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import StratifiedShuffleSplit, LeavePGroupsOut
from sklearn.utils import resample, check_X_y
from sklearn.utils.validation import check_is_fitted
from prep import SITES
from metrics import (
calc_ccc,
calc_csmf_accuracy_from_csmf,
correct_csmf_accuracy
)
class RandomClassifier(DummyClassifier):
"""Classifier to generate predictions uniformly at random
This subclasses sklearn.dummy.DummyClassifier and overrides the predict
method.
"""
def __init__(self, random_state=None, **kwargs):
self.strategy = 'uniform'
self.constant = 1
self.random_state = random_state
for arg, val in kwargs.items():
setattr(self, arg, val)
def predict(self, X):
"""Perform classification on test X.
This overrides the default behavior by of Sklearn classifiers by
returning both individual and population level predictions. This is
necessary because other classifiers estimate population distributions
in a manner slightly de-coupled from individual predictions.
Args:
X (dataframe): samples by features to test
Returns:
tuple:
* predictions (series): individual level prediction
* csmf: (series): population level predictions
"""
# This is a hack to use this classifer to test configuration where
# the default setup is used. With the default, None, is passed to
# ``clf.fit()`` and `clf.predict()`` doesn't know what classes to
# predict.
if not check_is_fitted(self, 'classes_'):
self.fit(X, X.index)
pred = super(RandomClassifier, self).predict(X)
indiv = pd.Series(pred, index=X.index)
csmf = indiv.value_counts() / len(indiv)
return indiv, csmf
def prediction_accuracy(clf, X_train, y_train, X_test, y_test,
resample_test=True, resample_size=1):
"""Mesaure prediction accuracy of a classifier.
Args:
clf: sklearn-like classifier object. It must implement a fit method
with the signature ``(X, y) --> self`` and a predict method with
a signature ``(X) --> (y, csmf)``
X_train (dataframe): samples by features matrix used for training
y_train (series): target values used for training
X_test (dataframe): samples by features matrix used for testing
y_test (series): target values to compare predictions against
resample_test (bool): resample test data to a dirichlet distribution
resample_size (float): scalar applied to n of samples to determine
output resample size.
Returns:
tuple:
* preds (dataframe): two column dataframe with actual and predicted
values for all observations
* csmfs (dataframe): two column dataframe with actual and predicted
cause-specific mortality fraction for each cause
* trained (dataframe): matrix of learned associations between
cause/symptom pairs from the training data
* ccc (dataframe): chance-correct concordance for each cause in one row
* accuracy (dataframe): summary accuracy measures in one row
"""
y_pred, csmf_pred = clf.fit(X_train, y_train).predict(X_test)
# All the outputs should be dataframes which can be concatentated and
# saved without the index
preds = pd.concat([y_test, y_pred], axis=1)
preds.index.name = 'ID'
preds.columns = ['actual', 'prediction']
preds.reset_index(inplace=True)
# Only calculate CCC for real causes. The classifier may predict causes
# which are not in the set of true causes. This primarily occurs when
# the classifier is run using default settings and no training or when it
# isn't properly learning impossible causes.
ccc = pd.DataFrame([{cause: calc_ccc(cause, y_test, y_pred)
for cause in y_test.unique()}])
# It's possible for some classes predictions not to occur
# These would result in missingness when aligning the csmf series
csmf_actual = y_test.value_counts(dropna=False, normalize=True)
csmf = | pd.concat([csmf_actual, csmf_pred], axis=1) | pandas.concat |
# Author: <NAME>
# Homework 1
# CAP 5610: Machine Learning
# required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from scipy.stats import chi2_contingency
# import data
train_df = pd.read_csv('/Users/mdjibanulhaquejiban/PhD_CRCV/Semesters/Spring2021/ML/HW/HW1/Titanic/train.csv')
test_df = pd.read_csv('/Users/mdjibanulhaquejiban/PhD_CRCV/Semesters/Spring2021/ML/HW/HW1/Titanic/test.csv')
combine = [train_df, test_df]
#######################
## Q1
print(train_df)
print(train_df.describe())
print(train_df.info())
## Q2-Q4
print(train_df.info())
## Q5
print(train_df.info())
print(test_df.info())
## Q6
print(train_df.head())
## Q7
# create a sub-dataframe with only numerical features
train_df_num = train_df[['Age', 'SibSp', 'Parch', 'Fare']]
print(train_df_num.describe())
## Q8
# create a sub-dataframe with only categorical features
train_df_cat = train_df[['Survived', 'Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'PassengerId']]
train_df_cat = train_df_cat.astype('object')
# print(train_df_cat.info())
train_df_cat.describe(include=[object])
## Q9
contigency_table= pd.crosstab(train_df['Pclass'], train_df['Survived'])
print('The contigency table:')
print('\n')
print(contigency_table)
# Chi-Sq test
chi2, p_value, deg_freedom, expected = chi2_contingency(contigency_table)
print('\n')
print('The test statistic is', chi2)
print('\n')
print('The p-value of the test is', p_value)
print('\n')
print('Degrees of freedom is', deg_freedom)
print('\n')
print('The expected frequencies, based on the marginal sums of the table. \n', expected)
## Q10
female = np.where((train_df['Sex']=='female'))
female_survived = np.where((train_df['Sex']=='female') & (train_df['Survived'] == 1))
print("The ratio of female survivals in training set is", len(female_survived[0])/len(female[0]))
## Chi-Sq
contigency_table= pd.crosstab(train_df['Sex'], train_df['Survived'])
print('The contigency table:')
print('\n')
print(contigency_table)
# Chi-Sq test
chi2, p_value, deg_freedom, expected = chi2_contingency(contigency_table)
print('\n')
print('The test statistic is', chi2)
print('\n')
print('The p-value of the test is', p_value)
## Q11
survived_age = train_df.loc[np.where((train_df['Survived'] == 1))]['Age']
not_survived_age = train_df.loc[np.where((train_df['Survived'] == 0))]['Age']
# survived histogram
survived_age.hist(bins=21, color='orange')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.title('Distribution of Age of People who survived (= 1)')
plt.show()
plt.close()
# not survived histogram
not_survived_age.hist(bins=21, color='tomato')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.title('Distribution of Age of People who did not survive (= 0)')
plt.show()
plt.close()
## Q12
# Create data
not_survived_pclass1_age = train_df.loc[np.where((train_df['Pclass'] == 1) & (train_df['Survived'] == 0))]['Age']
not_survived_pclass2_age = train_df.loc[np.where((train_df['Pclass'] == 2) & (train_df['Survived'] == 0))]['Age']
not_survived_pclass3_age = train_df.loc[np.where((train_df['Pclass'] == 3) & (train_df['Survived'] == 0))]['Age']
survived_pclass1_age = train_df.loc[np.where((train_df['Pclass'] == 1) & (train_df['Survived'] == 1))]['Age']
survived_pclass2_age = train_df.loc[np.where((train_df['Pclass'] == 2) & (train_df['Survived'] == 1))]['Age']
survived_pclass3_age = train_df.loc[np.where((train_df['Pclass'] == 3) & (train_df['Survived'] == 1))]['Age']
# plot figures
fig, axs = plt.subplots(3,2,figsize=(12,12))
fig.suptitle('Distributions of Age by Pclass and Survived')
axs[0,0].hist(not_survived_pclass1_age, bins=21, color='tomato')
axs[0,0].set_title('Pclass = 1 | Survived = 0')
axs[1,0].hist(not_survived_pclass2_age, bins=21, color='tomato')
axs[1,0].set_title('Pclass = 2 | Survived = 0')
axs[2,0].hist(not_survived_pclass3_age, bins=21, color='tomato')
axs[2,0].set_title('Pclass = 3 | Survived = 0')
axs[0,1].hist(survived_pclass1_age, bins=21, color='orange')
axs[0,1].set_title('Pclass = 1 | Survived = 1')
axs[1,1].hist(survived_pclass2_age, bins=21, color='orange')
axs[1,1].set_title('Pclass = 2 | Survived = 1')
axs[2,1].hist(survived_pclass3_age, bins=21, color='orange')
axs[2,1].set_title('Pclass = 3 | Survived = 1')
plt.show()
plt.close()
# Count number of passengers by pclass
train_df.groupby(['Pclass'])['PassengerId'].count()
train_df.groupby(['Pclass', 'Survived'])['PassengerId'].count()
## Q13
train_df_q13 = train_df.groupby(['Embarked', 'Survived', 'Sex'])['Fare'].mean()
# plot figures
fig, axs = plt.subplots(3,2,figsize=(11,11))
fig.suptitle('Distributions of Average Fare by Embarked, Survived and Sex')
axs[0,0].bar(['female', 'male'],train_df_q13[8:10].values, color='tomato')
axs[0,0].set_title('Embarked = S | Survived = 0')
axs[0,0].set_ylabel('Average Fare')
axs[1,0].bar(['female', 'male'],train_df_q13[:2].values, color='tomato')
axs[1,0].set_title('Embarked = C | Survived = 0')
axs[1,0].set_ylabel('Average Fare')
axs[2,0].bar(['female', 'male'],train_df_q13[4:6].values, color='tomato')
axs[2,0].set_title('Embarked = Q | Survived = 0')
axs[2,0].set_ylabel('Average Fare')
axs[0,1].bar(['female', 'male'],train_df_q13[10:12].values, color='orange')
axs[0,1].set_title('Embarked = S | Survived = 1')
axs[1,1].bar(['female', 'male'],train_df_q13[2:4].values, color='orange')
axs[1,1].set_title('Embarked = C | Survived = 1')
axs[2,1].bar(['female', 'male'],train_df_q13[6:8].values, color='orange')
axs[2,1].set_title('Embarked = Q | Survived = 1')
plt.show()
plt.close()
train_df.groupby(['Embarked', 'Survived', 'Sex'])['Fare'].mean()
train_df.groupby(['Embarked', 'Survived', 'Sex'])['PassengerId'].count()
## Q14
train_df.Ticket.duplicated().value_counts()
## Q15
train_df.Cabin.describe()
test_df.Cabin.describe()
## Q16
train_df['Gender'] = np.where(train_df['Sex']== 'male', 0, 1)
train_df.head(10)
## Q17
# calculate mean and standard deviation
mean = train_df['Age'].mean()
std = train_df['Age'].std()
print('Mean', mean)
print('Standard Deviation', std)
print('\n')
print('Estimated Missing Values in the Age feature.')
# we can randomly pick a value between standard deviation and Mean from Uniform distribution
# to impute missing values
def missing_value_imputation(value):
if np.isnan(value) == True:
value = random.uniform(std, mean)
else:
value = value
return value
# call the above function
train_df['Age'] = train_df['Age'].apply(missing_value_imputation)
## Q18
# find the most frequent value
most_frequent_value = train_df['Embarked'].value_counts().idxmax()
print('The most frequent value in Embarked:', most_frequent_value)
print('\n')
print('The training set with missing Embarked records')
is_na = train_df["Embarked"].isna()
print(train_df[is_na]["Embarked"])
# fill the missing records by the most frequent value
train_df["Embarked"] = train_df["Embarked"].fillna(most_frequent_value)
print('\n')
print('The training set without missing Embarked records')
print(train_df[is_na]["Embarked"])
## Q19
# find the most frequent value
mode = test_df['Fare'].mode()
print('The most frequent value in Fare:', mode)
print('\n')
print('The test set with missing Fare records')
is_na = test_df["Fare"].isna()
print(test_df[is_na]["Fare"])
# fill the missing records by the most frequent value
test_df["Fare"] = test_df["Fare"].fillna(mode[0])
print('\n')
print('The test set without missing Fare records')
print(test_df[is_na]["Fare"])
## Q20
train_df['ordinal_fare'] = np.where(train_df['Fare'] <= 7.91, 0,
(np.where(train_df['Fare'] <= 14.454, 1,
(np.where(train_df['Fare'] <= 31.0, 2, 3)))))
# print first 10 rows
# print(train_df.head(10))
train_df[['PassengerId','Fare','ordinal_fare']].head(10)
# reproduce the table in the question
Avg = pd.DataFrame(train_df.groupby(['ordinal_fare'])['Survived'].mean())
Avg = | pd.DataFrame(Avg) | pandas.DataFrame |
from datetime import datetime
from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app, make_response
from flask_login import current_user, login_required
from app import db
#from app.main.forms import EditProfileForm, PostForm, SearchForm, MessageForm
from app.models import User, Demand
from app.main import bp
from io import TextIOWrapper, StringIO
import csv
from ..utils import timestamp
from pyords.cluster.implementations import create_dbscan_expanded_clusters
from pyords.distance.haversine import create_haversine_matrix
import pyords as pyr
import pandas as pd
import numpy as np
ALLOWED_EXTENSIONS = {'csv'}
def init_vrp_w_df(dataframe:pd.DataFrame):
lats, lons = dataframe.latitude, dataframe.longitude
origins = [(41.4191, -87.7748)]
matrix = create_haversine_matrix(origins, lats, lons)
# unit is load for each node with demand (in this case
# only destinations). inserting 0 at the front of the array
demand = np.insert(dataframe.pallets.values, 0, 0)
return matrix, demand
def process_vrp(dataframe:pd.DataFrame):
# simplify euclidean distance calculation by projecting to positive vals
x = dataframe.latitude.values + 90
y = dataframe.longitude.values + 180
dataframe['cluster'] = create_dbscan_expanded_clusters(x, y)
results = pd.DataFrame(columns=dataframe.columns.tolist())
for cluster in dataframe.cluster.unique():
clustered_df = dataframe[dataframe.cluster == cluster].copy().reset_index(drop=True)
matrix, demand = init_vrp_w_df(clustered_df)
bndl = pyr.VrpBundle(matrix=matrix, demand=demand)
clustered_df = bndl.run().cast_solution_to_df(clustered_df)
clustered_df.vehicle = str(int(cluster)) + '-' + clustered_df.vehicle.astype(int)\
.astype(str)
results = results.append(clustered_df, sort=False)
return results
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@bp.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
return render_template('index.html', title='Home')
@bp.route('/upload', methods=['GET', 'POST'])
@login_required
def upload():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
user_id = current_user.get_id()
current_time = timestamp()
Demand.query.filter_by(user_id=user_id).delete() # TODO: for demo
df = process_vrp(pd.read_csv(TextIOWrapper(file, encoding='utf-8')))
flash('optimization complete!')
# get position of uploaded fields for more dynamic storage population
for i in range(len(df)):
if i > 0: # upload values only (field names are first row)
demand = Demand(
latitude=df.latitude.iloc[i],
longitude=df.longitude.iloc[i],
weight=df.weight.iloc[i],
pallets=df.pallets.iloc[i],
upload_date=timestamp(),
user_id=user_id,
vehicle_id=df.vehicle.iloc[i],
sequence_num=df.sequence.iloc[i],
stop_distance=df.stop_distance.iloc[i],
stop_load=df.stop_load.iloc[i]
)
db.session.add(demand)
db.session.commit()
return redirect(url_for('main.cvrp'))
return render_template('upload.html')
@bp.route('/cvrp', methods=['GET', 'POST'])
@login_required
def cvrp():
if request.method == 'GET':
user_id = current_user.get_id()
demand = db.engine.execute('select * from demand '
' where demand.user_id = %s' % user_id).fetchall()
data = [dict(row) for row in demand]
df = | pd.DataFrame(data) | pandas.DataFrame |
"""A dataframe concatenation function for PySpark."""
from collections import abc
import functools
from typing import (
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import pandas as pd
from pyspark.sql import (
DataFrame as SparkDF,
functions as F,
)
from ons_utils.generic import list_convert
Key = Sequence[Union[str, Sequence[str]]]
# The order of these is important, big ---> small.
SPARK_NUMBER_TYPES = (
'decimal(10,0)',
'double',
'float',
'bigint',
'int',
'smallint',
'tinyint',
)
def concat(
frames: Union[Iterable[SparkDF], Mapping[Key, SparkDF]],
keys: Optional[Key] = None,
names: Optional[Union[str, Sequence[str]]] = None,
) -> SparkDF:
"""
Concatenate pyspark DataFrames with additional key columns.
Will attempt to cast column data types where schemas are mismatched
and fill empty columns with Nulls:
* upcasts to largest number data type present (for that column)
* casts to string if there is at least one dtype of 'string' for a
given column
Parameters
----------
frames : a sequence or mapping of SparkDF
If a mapping is passed, then the sorted keys will be used as the
`keys` argument, unless it is passed, in which case the values
will be selected.
keys : a sequence of str or str sequences, optional
The keys to differentiate child dataframes in the concatenated
dataframe. Each key can have multiple parts but each key should
have an equal number of parts. The length of `names` should be
equal to the number of parts. Keys must be passed if `frames` is
a sequence.
names : str or list of str, optional
The name or names to give each new key column. Must match the
size of each key.
Returns
-------
SparkDF
A single DataFrame combining the given frames with a
``unionByName()`` call. The resulting DataFrame has new columns
for each given name, that contains the keys which identify the
child frames.
Notes
-----
This code is mostly adapted from :func:`pandas.concat`.
"""
if isinstance(frames, (SparkDF, str)):
raise TypeError(
"first argument must be an iterable of pyspark DataFrames,"
f" you passed an object of type '{type(frames)}'"
)
if len(frames) == 0:
raise ValueError("No objects to concatenate")
if isinstance(frames, abc.Sequence):
if keys and (len(frames) != len(keys)):
raise ValueError(
"keys must be same length as frames"
" when frames is a list or tuple"
)
if isinstance(frames, abc.Mapping):
if names is None:
raise ValueError(
"when the first argument is a mapping,"
" the names argument must be given"
)
if keys is None:
keys = list(frames.keys())
# If keys are passed with a mapping, then the mapping is subset
# using the keys. This also ensures the order is correct.
frames = [frames[k] for k in keys]
else:
frames = list(frames)
for frame in frames:
if not isinstance(frame, SparkDF):
raise TypeError(
f"cannot concatenate object of type '{type(frame)}'; "
"only pyspark.sql.DataFrame objs are valid"
)
schemas_df = _get_schemas_df(frames, keys, names)
schemas_are_equal = _compare_schemas(schemas_df)
# Allows dataframes with inconsistent schemas to be concatenated by
# filling empty columns with Nulls and casting some column data
# types where appropriate.
#
# Potentially remove when Spark 3.1.0 available.
if not schemas_are_equal:
frames = [
_ensure_consistent_schema(frame, schemas_df)
for frame in frames
]
# Potentially update with commented line when Spark 3.1.0 available.
# union = functools.partial(SparkDF.unionByName, allowMissingColumns=True)
union = SparkDF.unionByName
# If no keys or names are given then simply union the DataFrames.
if not names and not keys:
return functools.reduce(union, frames)
# Convert names and keys elements to a list if not already, so they
# can be iterated over in the next step.
names = list_convert(names)
keys = [list_convert(key) for key in keys]
if not all([len(key) == len(names) for key in keys]):
raise ValueError(
"the length of each key must equal the length of names"
)
if not all([len(key) == len(keys[0]) for key in keys]):
raise ValueError(
"all keys must be of equal length"
)
frames_to_concat = []
# Loop through each frame, and add each part in the keys to a new
# column defined by name.
for parts, frame in zip(keys, frames):
for name, part in reversed(tuple(zip(names, parts))):
frame = frame.select(F.lit(part).alias(name), '*')
frames_to_concat.append(frame)
return functools.reduce(union, frames_to_concat)
def _ensure_consistent_schema(
frame: SparkDF,
schemas_df: pd.DataFrame,
) -> SparkDF:
"""Ensure the dataframe is consistent with the schema.
If there are column data type mismatches, (more than one data type
for a column name in the column schemas) then will try to convert
the data type if possible:
* if they are all number data types, then picks the largest number
type present
* if one of the types is string, then ensures it casts the column to
string type
Also fills any missing columns with Null values, ensuring correct
dtype.
Parameters
----------
frame : SparkDF
column_schemas : set
A set of simple column schemas in the form (name, dtype) for all
dataframes set to be concatenated.
Returns
-------
SparkDF
Input dataframe with consistent schema.
"""
final_schema = _get_final_schema(schemas_df)
missing_fields = [f for f in final_schema if f not in frame.dtypes]
for column, dtype in missing_fields:
# If current frame missing the column in the schema, then
# set values to Null.
vals = (
F.lit(None) if column not in frame.columns
else F.col(column)
)
# Cast the values with the correct dtype.
frame = frame.withColumn(column, vals.cast(dtype))
return frame
def _get_final_schema(
schemas_df: pd.DataFrame
) -> Sequence[Tuple[str, str]]:
"""Get the final schema by coercing the types."""
# For a given column, if one of the types is string coerce all types
# to string.
schemas_df = schemas_df.mask(
schemas_df.eq('string').any(axis=1),
'string',
)
# For a given column, if all types are number types coerce all types
# to the largest spark number type present.
number_types = (
schemas_df
.fillna('int')
.isin(SPARK_NUMBER_TYPES)
.all(axis=1)
)
largest_num_types = schemas_df[number_types].apply(
lambda row: _get_largest_number_dtype(row.to_list()),
axis=1,
)
schemas_df = schemas_df.mask(number_types, largest_num_types, axis=0)
if not _check_equal_schemas(schemas_df).all():
raise TypeError(
"Spark column data type mismatch, can't auto-convert between"
f" types. \n\n{str(schemas_df[~_check_equal_schemas(schemas_df)])}"
)
# Return the final schema.
return [
(name, dtype)
# Only need the first two columns.
for name, dtype, *_ in schemas_df.reset_index().to_numpy()
]
def _get_largest_number_dtype(dtypes: Sequence[str]) -> str:
"""Return the largest Spark number data type in the input."""
return next((
dtype for dtype in SPARK_NUMBER_TYPES
if dtype in dtypes
))
def _compare_schemas(schemas_df: pd.DataFrame) -> bool:
"""Return True if schemas are equal, else throw warning.
If unequal, throws a warning that displays the schemas for all the
unequal columns.
Parameters
----------
schemas_df : pandas DataFrame
A dataframe of schemas with columns along the index, dataframe
name across the columns and the dtypes as the values. Create
with :func:`_get_schemas_df`.
Returns
-------
bool
True if column schemas are equal, else False.
"""
equal_schemas = _check_equal_schemas(schemas_df)
# Fill types across missing columns. We only want to raise a warning
# if the types are different.
schemas_df_filled = schemas_df.bfill(1).ffill(1)
equal_ignoring_missing_cols = _check_equal_schemas(schemas_df_filled)
if not equal_ignoring_missing_cols.all():
warnings.warn(
"column dtypes in the schemas are not equal, attempting to coerce"
f"\n\n{str(schemas_df.loc[~equal_schemas])}",
UnequalSchemaWarning,
)
return False
elif not equal_schemas.all():
return False
else:
return True
def _check_equal_schemas(df: pd.DataFrame) -> pd.DataFrame:
"""Checks that the first schema matches the rest."""
return df.apply(lambda col: col.eq(df.iloc[:, 0])).all(axis=1)
def _get_schemas_df(
frames: Sequence[pd.DataFrame],
keys: Optional[Key] = None,
names: Optional[Union[str, Sequence[str]]] = None,
) -> pd.DataFrame:
"""Return dataframe of column schemas for given frames."""
schemas_df = pd.DataFrame()
for df in frames:
col_names, dtypes = zip(*df.dtypes)
schema = | pd.Series(dtypes, index=col_names) | pandas.Series |
import importlib.util
spec = importlib.util.spec_from_file_location("BoundaryLayerToolbox", "/Users/claudiopierard/VC/BoundaryLayerToolbox.py")
blt = importlib.util.module_from_spec(spec)
spec.loader.exec_module(blt)
import matplotlib
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy as spy
import os
import sys
import pandas as pd
import datetime
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
xlat = np.loadtxt("/Users/claudiopierard/VC/datos/xlat_d02_interpolado.txt")
xlong = np.loadtxt("/Users/claudiopierard/VC/datos/xlong_d02_interpolado.txt")
hgt = np.loadtxt("/Users/claudiopierard/VC/datos/hgt_d02_interpolado.txt")
months = {1:'jan', 2:'feb', 3:'mar',4: 'apr', 5:'may', 6:'jun', 7:'jul', 8:'aug', 9:'sep', 10:'oct', 11:'nov', 12:'dic'}
path2datosVC = "../datos/dataframes_VC/cca/"
path2pollutants = "../datos/contaminantes/2015/CCA/"
path2graphs = "../graficas/cca/"
months = {'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04', 'may': '05', 'jun': '06', 'jul': '07', 'aug': '08', 'sep': '09', 'oct':'10', 'nov':'11', 'dic': '12'}
#Importo ubicación de estaciones a un diccionario
path2estaciones = "../datos/loc_estaciones/air_quality_stn.xy"
estaciones = pd.read_table(path2estaciones, index_col=0, names=['long','lat', 'height'])
estaciones = estaciones.transpose().to_dict()
print("*** START ***")
#Importo datos de contaminantes de 2015 en el CCA
o3_2015 = pd.read_csv(path2pollutants + "CCA_o3_2015.csv", index_col=0)
o3_2015.index = pd.to_datetime(o3_2015.index)
pm25_2015 = pd.read_csv(path2pollutants + "CCA_pm25_2015.csv", index_col=0)
pm25_2015.index = pd.to_datetime(pm25_2015.index)
co_2015 = pd.read_csv(path2pollutants + "CCA_co_2015.csv", index_col=0)
co_2015.index = pd.to_datetime(co_2015.index)
#Importo datos de VC de 2015 en el CCA para un mes
sys.argv.pop(0)
for mm in sys.argv:
print('\U0001F4C8', "Plotting", mm)
local_path = path2graphs + mm + '/' + 'vc_contaminantes_series/'
os.makedirs(local_path)
month_vc = pd.read_csv(path2datosVC + mm + "_dataframe_cca.csv", index_col=0)
month_vc.index = pd.to_datetime(month_vc.index)
date_beg = "2015-" + months[mm] + '-01'
date_end = "2015-" + months[mm] + '-' + blt.E1or30(mm)
#date_beg:date_end
############### THE WEEKEND BANDS #################
def dayOfWeek_array(datetime_arr):
y = datetime_arr.year
m = datetime_arr.month
d = datetime_arr.day
t = [0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4]
new_m = []
for i in m:
new_m.append(t[i-1])
new_m = np.array(new_m)
y -= m < 3
return np.trunc((y + y/4 - y/100 + y/400 + new_m + d) % 7).astype(np.int)
A = o3_2015[date_beg:date_end].index.date
B = o3_2015[date_beg:date_end].index.time
C = dayOfWeek_array(o3_2015[date_beg:date_end].index)
DF = pd.DataFrame({'date': A, 'time': B, 'week_index': C})
DF.index = DF.week_index
DF = DF.drop('week_index', 1)
sat = DF.loc[6].drop_duplicates(subset = 'date', keep = 'first')
sun = DF.loc[0].drop_duplicates(subset = 'date', keep = 'last')
new_sun = []
new_sat = []
for i in range(0, len(sun.date.values)):
dt = datetime.datetime.combine(sun['date'].values[i], sun['time'].values[i])
new_sun.append(dt)
for i in range(0, len(sat.date.values)):
dt = datetime.datetime.combine(sat['date'].values[i], sat['time'].values[i])
new_sat.append(dt)
sat = new_sat
sun = new_sun
weekend = []
for j in range(0, len(sun)):
if sun[j].day == 1:
weekend.append([sun[j] - datetime.timedelta(hours = 23), sun[j]])
for i in range(0, len(sat)):
for j in range(0, len(sun)):
if sat[i].day == sun[j].day - 1:
weekend.append([sat[i], sun[j]])
if sat[i].day == 31:
weekend.append([sat[i] , sat[i] + datetime.timedelta(hours = 23)])
#### Contingencias
# Marzo
cont_mar = [[pd.to_datetime('2015-03-03 16:00:00'), pd.to_datetime('2015-03-04 00:00:00')]]
cont_apr = [[ | pd.to_datetime('2015-04-08 16:00:00') | pandas.to_datetime |
'''
Urban-PLUMBER processing code
Associated with the manuscript: Harmonized, gap-filled dataset from 20 urban flux tower sites
Copyright (c) 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
'''
__title__ = "site-specific processing wrapper"
__version__ = "2021-09-20"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__description__ = 'Wrapper for processing individual sites. Includes setting site-specific information, importing raw site data, calling pipeline functions, creating site plots and webpages etc.'
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import os
import sys
import argparse
import importlib
import glob
# paths
oshome=os.getenv('HOME')
projpath = f'{oshome}/git/urban-plumber_pipeline' # root of repository
datapath = f'{oshome}/git/urban-plumber_pipeline/input_data' # raw data path (site data, global data)
sys.path.append(projpath)
import pipeline_functions
importlib.reload(pipeline_functions)
##########################################################################
# MANUAL USER INPUTS
##########################################################################
# these are overridden with --existing flag (i.e. python create_dataset_XX.py --existing)
create_raw_obs_nc = True # create obs nc from original format
create_rain_file = True # find and load nearest GHCND
qcplotdetail = True # plot quality control diurnal and timeseries
forcingplots = True # plot forcing and analysis period obs and gap-filling
create_outofsample_obs = True # for testing bias-correction on half of available obs
fullpipeline = True # undertake full pipeline e.g. cleaning, bias correction, data creation
##########################################################################
# COMMAND LINE ARGUMENT PARSING
##########################################################################
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument('--log', help='log print statements to file', action='store_true')
parser.add_argument('--projpath', help='replaces projpath with new path')
parser.add_argument('--datapath', help='replaces datapath with new path')
parser.add_argument('--existing', help='use existing outputs (processing already run)', action='store_true')
parser.add_argument('--globaldata',help='output site characteristics from global dataset (if available)', action='store_true')
args = parser.parse_args()
log_stout = False
if args.log:
log_stout = True
if args.projpath:
print(f'updating projpath to {projpath}')
projpath = args.projpath
if args.datapath:
print(f'updating datapath to {datapath}')
datapath = args.datapath
if args.existing:
print('using existing files')
create_raw_obs_nc = False
create_rain_file = False
qcplotdetail = False
forcingplots = False
create_outofsample_obs = False
fullpipeline = False
##########################################################################
# SITE SPECIFIC INFORMATION
##########################################################################
sitename = 'PL-Lipowa'
out_suffix = 'v0.9'
sitedata_suffix = 'v1'
local_utc_offset_hours = 1
long_sitename = 'Lipowa Street, Łódź, Poland'
obs_contact = '<NAME> (<EMAIL>) <NAME> (<EMAIL>)'
obs_reference = 'Fortuniak, Pawlak and Siedlecki (2013): https://doi.org/10.1007/s10546-012-9762-1; Pawlak, Fortuniak, Siedlecki (2011): https://doi.org/10.1002/joc.2247; <NAME> (2006): https://doi.org/10.1175/JAM2319.1'
obs_comment = 'Missing forcing filled with PL-Narutowicza tower site where available. Precipitation from IMGW Łódź Lublinek. Subset of available years included here.'
photo_source = 'Włodzimierz Pawlak'
history = 'v0.9 (2021-09-08): beta issue'
##########################################################################
# MAIN
##########################################################################
def main():
sitepath = f'{projpath}/sites/{sitename}'
print('preparing site data and attributes')
sitedata, siteattrs = pipeline_functions.prep_site(
sitename, sitepath, out_suffix, sitedata_suffix, long_sitename,
local_utc_offset_hours, obs_contact, obs_reference, obs_comment,
history, photo_source, args.globaldata, datapath)
print('getting observation netcdf\n')
if create_raw_obs_nc:
print(f'creating observational NetCDF in ALMA format\n')
raw_ds = import_obs(sitedata,siteattrs)
raw_ds = pipeline_functions.set_raw_attributes(raw_ds, siteattrs)
else:
fpath = f'{sitepath}/timeseries/{sitename}_raw_observations_{siteattrs["out_suffix"]}.nc'
raw_ds = xr.open_dataset(fpath)
if create_rain_file:
syear, eyear = raw_ds.time.dt.year.values[0] - 10, raw_ds.time.dt.year.values[-1]
nearest = pipeline_functions.find_ghcnd_closest_stations(syear,eyear,sitedata,datapath,nshow=4)
print('nearest stations, see: https://www.ncdc.noaa.gov/cdo-web/search:\n',nearest)
rain_sites = ['PLM00012375', # OKECIE, PL 52.166 20.967
'PLM00012566', # BALICE, PL 50.078 19.785
'PLM00012160', # <NAME>, PL 54.167 19.433
'LOE00116364'] # <NAME>, LO 49.3667 19.1667
# NO REASONABLY CLOSE GHCND SITES
rain_obs = pipeline_functions.get_ghcnd_precip(sitepath,datapath,syear,eyear,rain_sites)
pipeline_functions.write_ghcnd_precip(sitepath,sitename,rain_obs)
############################################
############ pipeline MAIN call ############
raw_ds, clean_ds, watch_ds, era_ds, corr_ds, lin_ds, forcing_ds = pipeline_functions.main(
datapath = datapath,
sitedata = sitedata,
siteattrs = siteattrs,
raw_ds = raw_ds,
fullpipeline = fullpipeline,
qcplotdetail = qcplotdetail)
############################################
print('post processing, plotting and checking')
pipeline_functions.post_process_site(sitedata,siteattrs,datapath,
raw_ds,forcing_ds,clean_ds,era_ds,watch_ds,corr_ds,lin_ds,
forcingplots,create_outofsample_obs)
print(f'{sitename} done!')
return raw_ds, clean_ds, watch_ds, era_ds, corr_ds, forcing_ds,
##########################################################################
# specific functinos
##########################################################################
def import_obs(sitedata,siteattrs):
sitepath = siteattrs["sitepath"]
# read data csv
print('reading raw data file')
raw1 = pd.read_csv(f'{datapath}/{sitename}/01_PL-LODZ_LIPOWA_STREET_2006-2015_forcing_data.dat',
na_values='-999', delim_whitespace=True, comment='#', header=None,
names=['datetime','SWdown','LWdown','Tair','Qair','pressure','Wind_N','Wind_E'])
raw2 = pd.read_csv(f'{datapath}/{sitename}/02_PL-LODZ_LIPOWA_STREET_2006-2015_evaluation_data.dat',
na_values='-999', delim_whitespace=True, comment='#', header=None,
names=['datetime','SWup','LWup','Qle','Qh'])
raw3 = pd.read_csv(f'{datapath}/{sitename}/01_PL-LODZ_LIPOWA_STREET_2006-2015_forcing_data_RH.dat',
na_values='-999', delim_whitespace=True)
# get times from data and reindex
times = pd.date_range(start='2006-07-11 01:00:00', end='2015-09-25 00:00:00', freq='60Min')
raw1.index = times
raw2.index = times
raw3.index = times
# rain from IMGW Łódź Lublinek (hourly)
rain = | pd.read_csv(f'{datapath}/{sitename}/Lodz_Lublinek_2006-2015_precipitation.txt',delim_whitespace=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 02:13:41 2019
@author: islam
"""
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, roc_auc_score,f1_score,recall_score
import heapq # for retrieval topK
from utilities import get_instances_with_random_neg_samples, get_test_instances_with_random_samples
from performance_and_fairness_measures import getHitRatio, getNDCG, differentialFairnessMultiClass, computeEDF, computeAbsoluteUnfairness
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from collaborative_models import neuralCollabFilter
#%%The function below ensures that we seed all random generators with the same value to get reproducible results
def set_random_seed(state=1):
gens = (np.random.seed, torch.manual_seed, torch.cuda.manual_seed)
for set_state in gens:
set_state(state)
RANDOM_STATE = 1
set_random_seed(RANDOM_STATE)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#%% fine-tuning pre-trained model with user-career pairs
def fine_tune_model(model,df_train, epochs, lr,batch_size,num_negatives,num_items, unsqueeze=False):
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-6)
model.train()
for i in range(epochs):
j = 0
for batch_i in range(0,np.int64(np.floor(len(df_train)/batch_size))*batch_size,batch_size):
data_batch = (df_train[batch_i:(batch_i+batch_size)]).reset_index(drop=True)
train_user_input, train_item_input, train_ratings = get_instances_with_random_neg_samples(data_batch, num_items, num_negatives,device)
if unsqueeze:
train_ratings = train_ratings.unsqueeze(1)
y_hat = model(train_user_input, train_item_input)
loss = criterion(y_hat, train_ratings)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch: ', i, 'batch: ', j, 'out of: ',np.int64(np.floor(len(df_train)/batch_size)), 'average loss: ',loss.item())
j = j+1
#%% model evaluation: hit rate and NDCG
def evaluate_fine_tune(model,df_val,top_K,random_samples, num_items):
model.eval()
avg_HR = np.zeros((len(df_val),top_K))
avg_NDCG = np.zeros((len(df_val),top_K))
for i in range(len(df_val)):
test_user_input, test_item_input = get_test_instances_with_random_samples(df_val[i], random_samples,num_items,device)
y_hat = model(test_user_input, test_item_input)
y_hat = y_hat.cpu().detach().numpy().reshape((-1,))
test_item_input = test_item_input.cpu().detach().numpy().reshape((-1,))
map_item_score = {}
for j in range(len(y_hat)):
map_item_score[test_item_input[j]] = y_hat[j]
for k in range(top_K):
# Evaluate top rank list
ranklist = heapq.nlargest(k, map_item_score, key=map_item_score.get)
gtItem = test_item_input[0]
avg_HR[i,k] = getHitRatio(ranklist, gtItem)
avg_NDCG[i,k] = getNDCG(ranklist, gtItem)
avg_HR = np.mean(avg_HR, axis = 0)
avg_NDCG = np.mean(avg_NDCG, axis = 0)
return avg_HR, avg_NDCG
#%%
def fairness_measures(model,df_val,num_items,protectedAttributes):
model.eval()
user_input = torch.LongTensor(df_val['user_id'].values).to(device)
item_input = torch.LongTensor(df_val['like_id'].values).to(device)
y_hat = model(user_input, item_input)
avg_epsilon = computeEDF(protectedAttributes,y_hat,num_items,item_input,device)
U_abs = computeAbsoluteUnfairness(protectedAttributes,y_hat,num_items,item_input,device)
avg_epsilon = avg_epsilon.cpu().detach().numpy().reshape((-1,)).item()
print(f"average differential fairness: {avg_epsilon: .3f}")
U_abs = U_abs.cpu().detach().numpy().reshape((-1,)).item()
print(f"absolute unfairness: {U_abs: .3f}")
#%% load data
train_users= pd.read_csv("train-test/train_usersID.csv",names=['user_id'])
test_users = pd.read_csv("train-test/test_usersID.csv",names=['user_id'])
train_careers= | pd.read_csv("train-test/train_concentrationsID.csv",names=['like_id']) | pandas.read_csv |
import numpy as np
import pandas as pd
from popmon.hist.histogram import (
HistogramContainer,
project_on_x,
project_split2dhist_on_axis,
sum_entries,
sum_over_x,
)
from popmon.hist.patched_histogrammer import histogrammar as hg
def get_test_data():
df = pd.util.testing.makeMixedDataFrame()
df["date"] = df["D"].apply(lambda x: pd.to_datetime(x).value)
return df
def unit(x):
return x
def get_histograms():
df = get_test_data()
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist2,
)
for hist in [hist1, hist2, hist3]:
hist.fill.numpy(df)
return hist1, hist2, hist3
def test_histogrammar():
hist1, hist2, hist3 = get_histograms()
assert hist1.entries == 5
assert hist1.n_dim == 1
assert hist1.size == 5
assert hist2.entries == 5
assert hist2.n_dim == 2
assert hist2.num == 5
assert hist3.entries == 5
assert hist3.n_dim == 3
assert hist3.num == 7
def test_histogram_attributes():
hist1, hist2, hist3 = get_histograms()
hist_obj1 = HistogramContainer(hist1)
hist_obj2 = HistogramContainer(hist2)
hist_obj3 = HistogramContainer(hist3)
assert hist_obj1.is_num is False
assert hist_obj1.is_ts is False
assert hist_obj2.is_num is True
assert hist_obj2.is_ts is False
assert hist_obj3.is_num is True
assert hist_obj3.is_ts is True
def test_sparse_bin_centers_x():
hist1, hist2, hist3 = get_histograms()
hist_obj3 = HistogramContainer(hist3)
centers3, values3 = hist_obj3.sparse_bin_centers_x()
np.testing.assert_array_equal(
centers3, [1.2308112e18, 1.2308976e18, 1.2311568e18, 1.2312432e18, 1.2313296e18]
)
def test_split_hist_along_first_dimension():
hist1, hist2, hist3 = get_histograms()
hist_obj1 = HistogramContainer(hist1)
hist_obj2 = HistogramContainer(hist2)
hist_obj3 = HistogramContainer(hist3)
split3a = hist_obj3.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=True, convert_time_index=True
)
split3b = hist_obj3.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=True, convert_time_index=False
)
split3c = hist_obj3.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=False, convert_time_index=True
)
keys3a = list(split3a.keys())
keys3b = list(split3b.keys())
keys3c = list(split3c.keys())
check3a = [
pd.Timestamp("2009-01-01 12:00:00"),
pd.Timestamp("2009-01-02 12:00:00"),
pd.Timestamp("2009-01-05 12:00:00"),
pd.Timestamp("2009-01-06 12:00:00"),
pd.Timestamp("2009-01-07 12:00:00"),
]
check3b = [1.2308112e18, 1.2308976e18, 1.2311568e18, 1.2312432e18, 1.2313296e18]
check3c = [
"y[x=2009-01-01 12:00:00]",
"y[x=2009-01-02 12:00:00]",
"y[x=2009-01-05 12:00:00]",
"y[x=2009-01-06 12:00:00]",
"y[x=2009-01-07 12:00:00]",
]
np.testing.assert_array_equal(keys3a, check3a)
np.testing.assert_array_equal(keys3b, check3b)
np.testing.assert_array_equal(keys3c, check3c)
split2a = hist_obj2.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=True, convert_time_index=True
)
split2b = hist_obj2.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=True, convert_time_index=False
)
split2c = hist_obj2.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=False, convert_time_index=False
)
keys2a = list(split2a.keys())
keys2b = list(split2b.keys())
keys2c = list(split2c.keys())
check2a = [0.5, 1.5, 2.5, 3.5, 4.5]
check2b = [0.5, 1.5, 2.5, 3.5, 4.5]
check2c = ["y[x=0.5]", "y[x=1.5]", "y[x=2.5]", "y[x=3.5]", "y[x=4.5]"]
np.testing.assert_array_equal(keys2a, check2a)
np.testing.assert_array_equal(keys2b, check2b)
np.testing.assert_array_equal(keys2c, check2c)
split1a = hist_obj1.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=True, convert_time_index=True
)
split1b = hist_obj1.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=True, convert_time_index=False
)
split1c = hist_obj1.split_hist_along_first_dimension(
xname="x", yname="y", short_keys=False, convert_time_index=False
)
keys1a = list(split1a.keys())
keys1b = list(split1b.keys())
keys1c = list(split1c.keys())
check1a = ["foo1", "foo2", "foo3", "foo4", "foo5"]
check1b = ["foo1", "foo2", "foo3", "foo4", "foo5"]
check1c = ["x=foo1", "x=foo2", "x=foo3", "x=foo4", "x=foo5"]
np.testing.assert_array_equal(keys1a, check1a)
np.testing.assert_array_equal(keys1b, check1b)
np.testing.assert_array_equal(keys1c, check1c)
# look at the split hists
hs3 = split3a[keys3a[0]]
hs2 = split2a[keys2a[0]]
hs1 = split1a[keys1a[0]]
assert hs3.n_dim == 2
assert hs2.n_dim == 1
assert isinstance(hs3, hg.Bin)
assert isinstance(hs2, hg.Categorize)
assert isinstance(hs1, hg.Count)
assert hs3.contentType == "Categorize"
assert hs2.contentType == "Count"
def test_sum_entries():
hist1, hist2, hist3 = get_histograms()
assert sum_entries(hist1) == 5
assert sum_entries(hist2) == 5
assert sum_entries(hist3) == 5
def test_sum_over_x():
df = get_test_data()
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.Bin(5, 0, 5, unit("A"))
hist4 = hg.Categorize(unit("C"), value=hist3)
for hist in [hist1, hist2, hist3, hist4]:
hist.fill.numpy(df)
histC = sum_over_x(hist2)
histA = sum_over_x(hist4)
bin_edgesA = histA.bin_edges()
bin_entriesA = histA.bin_entries()
bin_edges3 = hist3.bin_edges()
bin_entries3 = hist3.bin_entries()
bin_labelsC = histC.bin_labels()
bin_entriesC = histC.bin_entries()
bin_labels1 = hist1.bin_labels()
bin_entries1 = hist1.bin_entries(bin_labelsC) # match order of labels
np.testing.assert_array_equal(bin_edgesA, bin_edges3)
np.testing.assert_array_equal(bin_entriesA, bin_entries3)
np.testing.assert_array_equal(sorted(bin_labelsC), sorted(bin_labels1))
np.testing.assert_array_equal(bin_entriesC, bin_entries1)
def test_project_on_x():
df = get_test_data()
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.Bin(5, 0, 5, unit("A"))
hist4 = hg.Categorize(unit("C"), value=hist3)
for hist in [hist1, hist2, hist3, hist4]:
hist.fill.numpy(df)
histA = project_on_x(hist2)
histC = project_on_x(hist4)
bin_edgesA = histA.bin_edges()
bin_entriesA = histA.bin_entries()
bin_edges3 = hist3.bin_edges()
bin_entries3 = hist3.bin_entries()
bin_labelsC = histC.bin_labels()
bin_entriesC = histC.bin_entries()
bin_labels1 = hist1.bin_labels()
bin_entries1 = hist1.bin_entries(bin_labelsC) # match order of labels
np.testing.assert_array_equal(bin_edgesA, bin_edges3)
np.testing.assert_array_equal(bin_entriesA, bin_entries3)
np.testing.assert_array_equal(sorted(bin_labelsC), sorted(bin_labels1))
np.testing.assert_array_equal(bin_entriesC, bin_entries1)
def test_project_split2dhist_on_axis():
df = get_test_data()
histA = hg.Bin(5, 0, 5, unit("A"))
histC = hg.Categorize(unit("C"))
hist1 = hg.Categorize(unit("C"), value=histA)
hist2 = hg.Bin(5, 0, 5, unit("A"), value=histC)
histDCA = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist1,
)
histDAC = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist2,
)
histDA = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth= | pd.Timedelta(days=1) | pandas.Timedelta |
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import datetime as dt
import pandas_datareader as web
app = dash.Dash()
server = app.server
start = dt.datetime(2000,1,1)
end = dt.datetime.now()
df = web.DataReader('AAPL','yahoo', start, end)
df=df.reset_index()
df["Date"]=pd.to_datetime(df.Date,format="%Y-%m-%d")
df.index=df['Date']
data=df.sort_index(ascending=True,axis=0)
new_data=pd.DataFrame(index=range(0,len(df)),columns=['Date','Close'])
for i in range(0,len(data)):
new_data["Date"][i]=data['Date'][i]
new_data["Close"][i]=data["Close"][i]
new_data=new_data.set_index('Date')
dataset=new_data.values
tickers = ['TSLA','AAPL','FB','MSFT','SBUX']
df1 = web.DataReader(tickers, data_source='yahoo', start='2017-01-01', end=dt.datetime.now())
df=df1.stack().reset_index().rename(index=str, columns={"level_1": "Symbols"}).sort_values(['Symbols','Date'])
df["Date"]=pd.to_datetime(df.Date,format="%Y-%m-%d")
df.index=df['Date']
D_validationData= pd.read_csv("LSTM_validation.csv")
D_train_data= pd.read_csv("LSTM_train.csv")
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x=D_validationData["Date"], y=D_validationData["Close"],
mode='lines',name='Validation',line=dict(color="blue",width=4)))
fig2.add_trace(go.Scatter(x=D_validationData["Date"], y=D_validationData["Predictions"],
mode='lines',name='Stock Price Predicted ',line=dict(color="red",width=4)))
fig2.add_trace(go.Scatter(x=D_train_data["Date"], y=D_train_data["Close"],
mode='lines', name='Train',line=dict(color="darkblue",width=4)))
fig2.update_layout(hovermode='x unified',
showlegend=True,
plot_bgcolor="white",
paper_bgcolor = "rgba(0,0,0,0)",
xaxis_title="Date",
yaxis_title="Closing Rate",
legend_title="Data:",
margin=dict(t=50,l=200,b=50,r=200),
)
fig2.update_xaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')
fig2.update_yaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')
moving_avg= pd.read_csv("test_mov_avg.csv")
fig3 = go.Figure()
fig3.add_trace(go.Scatter(x=moving_avg["date"], y=moving_avg["close"],
mode='lines',
name='Test',
line=dict(color="darkblue",width=3)))
fig3.add_trace(go.Scatter(x=moving_avg["date"], y=moving_avg["est_N2"],
mode='lines',
name='Stock Price Predicted',
line=dict(color="red",width=3)))
fig3.update_layout(hovermode='x unified',
showlegend=True,
plot_bgcolor="white",
paper_bgcolor = "rgba(0,0,0,0)",
xaxis_title="Date",
yaxis_title="Closing Rate",
legend_title="Data:",
margin=dict(t=50,l=200,b=50,r=200),
)
fig3.update_xaxes(showline=True, linewidth=1, linecolor='white', gridcolor='lightgray')
fig3.update_yaxes(showline=True, linewidth=1, linecolor='white', gridcolor='lightgray')
# Import CSV tree training data
Tree_training= pd.read_csv("Tree_training_data.csv")
Tree_prediction= pd.read_csv("Tree_model_prediction.csv")
figt = go.Figure()
figt.add_trace(go.Scatter(x=Tree_training["Date"], y=Tree_training["Close"],
mode='lines',
name='Training ',
line=dict(color="darkblue",width=3)))
figt.add_trace(go.Scatter(x=Tree_prediction["Date"], y=Tree_prediction["Close"],
mode='lines',
name='Validation',
line=dict(color="blue",width=4)))
figt.add_trace(go.Scatter(x=Tree_prediction["Date"], y=Tree_prediction["Predictions"],
mode='lines', name='Stock Price Predicted',
line=dict(color="red",width=2)))
figt.update_layout(hovermode='x unified',
showlegend=True,
plot_bgcolor="white",
paper_bgcolor = "rgba(0,0,0,0)",
xaxis_title="Date",
yaxis_title="Closing Rate",
legend_title="Data:",
margin=dict(t=50,l=200,b=50,r=200),
)
figt.update_xaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')
figt.update_yaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')
# Linear Regression Model data
LR_train= pd.read_csv("LR_train.csv")
LR_prediction= pd.read_csv("LR_prediction.csv")
# Create figure lines for Linear Regression Model
figLR = go.Figure()
figLR.add_trace(go.Scatter(x=LR_train["Date"], y=LR_train["Close"],
mode='lines',
name='Training ',
line=dict(color="darkblue",width=3)))
figLR.add_trace(go.Scatter(x=LR_prediction["Date"], y=LR_prediction["Close"],
mode='lines',
name='Validation',
line=dict(color="blue",width=3)))
figLR.add_trace(go.Scatter(x=LR_prediction["Date"], y=LR_prediction["Predictionslr"],
mode='lines', name='Stock Price Predicted',
line=dict(color="red",width=3)))
figLR.update_layout(hovermode='x unified',
showlegend=True,
plot_bgcolor="white",
paper_bgcolor = "rgba(0,0,0,0)",
xaxis_title="Date",
yaxis_title="Closing Rate",
legend_title="Data:",
margin=dict(t=50,l=200,b=50,r=200),
)
figLR.update_xaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')
figLR.update_yaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')
# Getting Info for all Models comparison
l30=dt.datetime.now()- dt.timedelta(30)
m_actual = web.DataReader('AAPL','yahoo', l30, dt.datetime.now())
m_actual=m_actual.reset_index()
# COMPLETE code to get models table
l30=dt.datetime.now()- dt.timedelta(30)
# Actual
m_actual_df = web.DataReader('AAPL','yahoo', l30, dt.datetime.now())
m_actual_df=m_actual_df.reset_index()
m_actual=m_actual_df[['Date','Close']]
m_actual["Model"]="Actual Close Price"
m_actual.rename(columns={'Close':'Predictions'}, inplace=True)
# LR
m_LR=LR_prediction[['Date','Predictionslr']]
m_LR["Model"]="Linear Regression Model"
m_LR.rename(columns={'Predictionslr':'Predictions'}, inplace=True)
# Tree Prediction
m_tree=Tree_prediction[['Date','Predictions']]
m_tree["Model"]="Tree Model"
# Moving Average
m_MA=moving_avg[['date','est_N2']]
m_MA["Model"]="Moving Average Model"
m_MA.rename(columns={'est_N2':'Predictions','date':"Date"}, inplace=True)
m_MA["Date"]=pd.to_datetime(m_MA.Date,format="%Y-%m-%d")
m_MA1 = m_MA[(m_MA['Date']>(dt.datetime.now()- dt.timedelta(30))) & (m_MA['Date']<dt.datetime.now())]
# Long short-term memory
D_validationData["Date"]= | pd.to_datetime(D_validationData.Date,format="%Y-%m-%d") | pandas.to_datetime |
"""Risk Premiums from Fama-Macbeth Cross-sectional Regression
- pandas datareader, Fama French data library
<NAME>
License: MIT
"""
import os
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
import pandas_datareader as pdr
from pandas_datareader.data import DataReader
from pandas_datareader.famafrench import FamaFrenchReader
from finds.database import SQL, Redis
from finds.structured import CRSP, Signals, Benchmarks
from finds.busday import BusDay
from finds.backtesting import RiskPremium
from finds.solve import winsorized
from settings import settings
LAST_DATE = settings['crsp_date']
sql = SQL(**settings['sql'])
user = SQL(**settings['user'])
rdb = Redis(**settings['redis'])
bd = BusDay(sql)
crsp = CRSP(sql, bd, rdb)
bench = Benchmarks(sql, bd)
signals = Signals(user)
logdir = os.path.join(settings['images'], 'fm')
def least_squares(data=None, y=['y'], x=['x'], stdres=False):
"""Helper to compute least square coefs, supports groupby().apply"""
X = data[x].to_numpy()
Y = data[y].to_numpy()
X = np.hstack([np.ones((X.shape[0], 1)), X])
x = ['Intercept'] + x
b = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, Y)).T
if stdres:
b = np.hstack([b, np.std(Y-(X @ b.T), axis=0).reshape(-1,1)])
x = x + ['stdres']
return ( | DataFrame(b, columns=x, index=y) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 100.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 640.0], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 100860.0], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 273.65], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 273.84999999999997], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 273.34999999999997], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tidy_metric():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=(1048,),
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"parameter",
"date",
"value",
"quality",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": | pd.Categorical(["01048"] * 28) | pandas.Categorical |
# Utility functions
import re
import pandas as pd
from collections import Counter
from nltk.tokenize import wordpunct_tokenize
from nltk.corpus import stopwords
import requests
import simplejson
def my_replacements(text):
"""
Quick function to clean up some of my review text. It clears HTML and some extra characters.
Also removes my frequent mentions of full reviews on my blog.
:param text:
Text to process
:return:
Processed text
"""
text = re.sub(r'<(.*?)>', ' ', text) # removing HTML code encapsulated within <>
text = re.sub(r'\n', ' ', text) # removing newline characters
text = re.sub(r' ', ' ', text) # removing some extra HTML code
text = re.sub(r'\"','', text) # removing explicit quotation marks
text = re.sub(r"\'", '', text) # removing explicit single quotation marks
# Text replacement
stop_text = ["For my full review", "For a full review", "check out my blog", "Read my full review at my blog",
"review can be found in my blog", "A full review is available on my blog", "review is up on my blog",
"full review", "my blog"]
for elem in stop_text:
text = re.sub(elem, '', text)
return text
def get_sentiment(text, more_stop_words=['']):
# Load up the NRC emotion lexicon
filename = 'data/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt'
# Add to stop_words
stop_words = set(stopwords.words('english'))
stop_words.update(more_stop_words)
emotion_data = | pd.read_csv(filename, delim_whitespace=True, skiprows=45, header=None, names=['word', 'affect', 'flag']) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
from tqdm import tqdm
from lidopt.model import evaluate, calculate_metrics
from lidopt import PARAM_GRID, METRICS, EXP, SIM, MODE
from lidopt.parsers import parse_experiment
def run(event=None, event_name=None, path='./data/output/results.csv'):
col_names = list(PARAM_GRID.keys())
all_combinations = list(itertools.product(*[PARAM_GRID[k] for k in PARAM_GRID.keys()]))
df = pd.DataFrame.from_records(all_combinations, columns=col_names)
for metric in METRICS:
df[metric] = 0
for i in tqdm(range(df.shape[0])):
if event_name is not None:
results = evaluate(reportfile='./data/output/reports/parameters_RE_{0}_{1}.txt'.format(event_name+1,i+1), experiment=event, params=df.loc[i, col_names])
else:
results = evaluate(reportfile='./data/output/reports/parameters_{}.txt'.format(str(i+1)), experiment=event, params=df.loc[i, col_names])
for metric in METRICS:
df.loc[i, metric] = results[metric]
if event_name is not None:
df.to_csv(path.format(event_name+1), index_label='simulation_number')
else:
idx = pd.Series([i+1 for i in range(len(df))])
df.set_index(idx, inplace=True)
df.to_csv(path, index_label='simulation_number')
return df
def run_per_event():
rain_events = pd.read_csv(EXP['rain_events'], parse_dates=True)
total_rain_events = rain_events.shape[0]
event_path = "./data/output/events/event_CAL_{}_RE_{}.csv"
df = run()
for i,row in tqdm(df.iterrows(), total = df.shape[0]):
for j in range(total_rain_events):
# Initialize metrics object
col_names = list(PARAM_GRID.keys())
all_combinations = list(itertools.product(*[PARAM_GRID[k] for k in PARAM_GRID.keys()]))
metrics_df = | pd.DataFrame.from_records(all_combinations, columns=col_names) | pandas.DataFrame.from_records |
import re
import datetime as dt
import numpy as np
import pandas as pd
from path import Path
from PIL import Image
import base64
from io import BytesIO
import plotly
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from skimage import io
import onion_trees as ot
import visualize as bv
import json
import statsmodels as sm
from statsmodels.formula.api import ols
from data import STATE2ABBREV, COUNTY_CORRECTIONS
import bjorn_support as bs
import mutations as bm
def load_img(img_filepath):
img = io.imread(img_filepath)
pil_img = Image.fromarray(img) # PIL image object
prefix = "data:image/png;base64,"
with BytesIO() as stream:
pil_img.save(stream, format="png")
base64_string = prefix + base64.b64encode(stream.getvalue()).decode("utf-8")
fig = go.Figure(go.Image(source=base64_string))
fig.update_layout(margin=dict(l=0, r=0, b=0, t=0),
coloraxis_showscale=False, template='plotly_white', autosize=True)
fig.update_xaxes(showticklabels=False).update_yaxes(showticklabels=False)
return fig
def world_time_relative(data, feature, values, res, strain='B117', vocs=['B.1.1.7', 'B.1.1.70']):
if len(values)==1:
data.loc[:, 'weekday'] = data['date'].dt.weekday
data.loc[:, 'date'] = data['date'] - data['weekday'] * dt.timedelta(days=1)
results = (data.loc[(data[feature]==values[0])]
.drop_duplicates(subset=['date', 'strain']))
total_samples = (data[(~data['pangolin_lineage'].isin(vocs))]
.groupby('date')
.agg(total_samples=('strain', 'nunique')))
else:
res = res.copy()
# res.loc[:, 'tmp'] = res['date'].str.split('-')
# res = res[res['tmp'].str.len()>=3]
# res.loc[:, 'date'] = pd.to_datetime(res['date'], errors='coerce')
res.loc[:, 'weekday'] = res['date'].dt.weekday
res.loc[:, 'date'] = res['date'] - res['weekday'] * dt.timedelta(days=1)
total_samples = (res[(~res['pangolin_lineage'].isin(vocs))]
.groupby('date')
.agg(total_samples=('strain', 'nunique'))
.reset_index())
results = res[(res['is_vui']==True)].drop_duplicates(subset=['date', 'strain'])
b117_world_time = (results.groupby('date')
.agg(num_samples=('strain', 'nunique'),
country_counts=('country',
lambda x: np.unique(x,
return_counts=True)),
divisions=('division', 'unique'),
locations=('location', 'unique'))
.reset_index())
b117_world_time.loc[:, 'countries'] = b117_world_time['country_counts'].apply(lambda x: list(x[0]))
b117_world_time.loc[:, 'country_counts'] = b117_world_time['country_counts'].apply(lambda x: list(x[1]))
b117_world_time = pd.merge(b117_world_time, total_samples, on='date', how='right')
b117_world_time.loc[:, ['countries', 'divisions', 'locations']].fillna('', inplace=True)
b117_world_time.loc[:, ['num_samples', 'total_samples']] = b117_world_time[['num_samples', 'total_samples']].fillna(0)
first_detected = b117_world_time.loc[b117_world_time['num_samples']>0]['date'].min()
first_countries = b117_world_time.loc[b117_world_time['date']==first_detected, 'countries'].values[0]
b117_world_time = b117_world_time[b117_world_time['date']>=first_detected]
b117_world_time['cum_num_samples'] = b117_world_time['num_samples'].cumsum()
b117_world_time.loc[:, 'cum_total_samples'] = b117_world_time['total_samples'].cumsum()
b117_world_time.loc[:, 'rel_freq'] = b117_world_time['cum_num_samples'] / b117_world_time['cum_total_samples']
fig = go.Figure(data=go.Scatter(y=b117_world_time['rel_freq'],
x=b117_world_time['date'],
name='B.1.1.7 samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_world_time[['num_samples', 'countries', 'country_counts',
'divisions', 'locations',
'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>Country(s) Reported: %{text[1]}</b><br>" +
"<b>Cases Per Country: %{text[2]}</b><br>" +
"<b>State(s) Reported: %{text[3]}</b><br>" +
"<b>County(s) Reported: %{text[4]}</b><br>" +
"<b>Date: %{text[5]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_world_time.loc[b117_world_time['date']==first_detected, 'rel_freq'].values[0],
text=f"On Earth, {strain} 1st detected in <br> {', '.join(first_countries)} <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-250, ax=100)
fig.update_layout(yaxis_title=f'Relative cumulative frequency of {strain} on Earth',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
fig.update_yaxes(side = 'right')
return fig
def world_time(data, feature, values, res, strain='B117', sampling_type='random'):
if len(values)==1:
results = (data.loc[(data[feature]==values[0])]
.drop_duplicates(subset=['date', 'strain']))
else:
# results = (data.groupby(['date', 'country', 'division', 'purpose_of_sequencing',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
results = (res[(res['is_vui']==True)]
.drop_duplicates(subset=['date', 'strain']))
b117_world_time = (results.groupby('date')
.agg(num_samples=('strain', 'nunique'),
country_counts=('country',
lambda x: np.unique(x,
return_counts=True)),
divisions=('division', 'unique'),
locations=('location', 'unique'))
.reset_index())
b117_world_time.loc[:, 'countries'] = b117_world_time['country_counts'].apply(lambda x: list(x[0]))
b117_world_time.loc[:, 'country_counts'] = b117_world_time['country_counts'].apply(lambda x: list(x[1]))
b117_world_time.loc[:, 'date'] = pd.to_datetime(b117_world_time['date'],
errors='coerce')
b117_world_time['cum_num_samples'] = b117_world_time['num_samples'].cumsum()
first_detected = b117_world_time['date'].min()
first_countries = b117_world_time.loc[b117_world_time['date']==first_detected, 'countries']
fig = go.Figure(data=go.Scatter(y=b117_world_time['cum_num_samples'],
x=b117_world_time['date'],
name='B.1.1.7 samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_world_time[['num_samples', 'countries', 'country_counts',
'divisions', 'locations',
'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>Country(s) Reported: %{text[1]}</b><br>" +
"<b>Cases Per Country: %{text[2]}</b><br>" +
"<b>State(s) Reported: %{text[3]}</b><br>" +
"<b>County(s) Reported: %{text[4]}</b><br>" +
"<b>Date: %{text[5]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_world_time.loc[b117_world_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"On Earth, {strain} 1st detected in <br> {', '.join(first_countries.values[0])} <br> on <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-250, ax=100)
fig.update_layout(yaxis_title='Global cumulative number of cases over time',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
fig.update_yaxes(side = 'right')
return fig
def us_time_relative(data, feature, values, res, strain='B117', country='USA', vocs=['B.1.1.7', 'B.1.1.70']):
if len(values)==1:
data.loc[:, 'weekday'] = data['date'].dt.weekday
data.loc[:, 'date'] = data['date'] - data['weekday'] * dt.timedelta(days=1)
results = (data.loc[(data[feature]==values[0])&
(data['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
total_samples = (data[(~data['pangolin_lineage'].isin(vocs))&
(data['country']=='United States of America')]
.groupby('date')
.agg(total_samples=('strain', 'nunique')))
else:
# results = (data.groupby(['date', 'country', 'division', 'purpose_of_sequencing',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
res = res.copy()
# res.loc[:, 'tmp'] = res['date'].str.split('-')
# res = res[res['tmp'].str.len()>=3]
# res.loc[:, 'date'] = pd.to_datetime(res['date'], errors='coerce')
res.loc[:, 'weekday'] = res['date'].dt.weekday
res.loc[:, 'date'] = res['date'] - res['weekday'] * dt.timedelta(days=1)
total_samples = (res[(~res['pangolin_lineage'].isin(vocs))
&(res['country']=='United States of America')]
.groupby('date')
.agg(total_samples=('strain', 'nunique'))
.reset_index())
results = (res[(res['is_vui']==True)
& (res['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
results['purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_us_time = (random.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
b117_us_time.loc[:, 'states'] = b117_us_time['state_counts'].apply(lambda x: list(x[0]))
b117_us_time.loc[:, 'state_counts'] = b117_us_time['state_counts'].apply(lambda x: list(x[1]))
b117_us_time = pd.merge(b117_us_time, total_samples, on='date', how='right')
b117_us_time.loc[:, 'states'].fillna('', inplace=True)
b117_us_time.loc[:, ['num_samples', 'total_samples']] = b117_us_time[['num_samples', 'total_samples']].fillna(0)
sdrop_us_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
sdrop_us_time.loc[:, 'states'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[0]))
sdrop_us_time.loc[:, 'state_counts'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[1]))
sdrop_us_time = pd.merge(sdrop_us_time, total_samples, on='date', how='right')
sdrop_us_time.loc[:, 'states'].fillna('', inplace=True)
sdrop_us_time.loc[:, ['num_samples', 'total_samples']] = sdrop_us_time[['num_samples', 'total_samples']].fillna(0)
fig = go.Figure()
if b117_us_time[b117_us_time['num_samples']>0].shape[0] > 0:
first_detected = b117_us_time.loc[b117_us_time['num_samples']>0]['date'].min()
first_states = b117_us_time.loc[b117_us_time['date']==first_detected, 'states'].values[0]
b117_us_time = b117_us_time[b117_us_time['date']>=first_detected]
b117_us_time.loc[:, 'cum_num_samples'] = b117_us_time['num_samples'].cumsum()
b117_us_time.loc[:, 'cum_total_samples'] = b117_us_time['total_samples'].cumsum()
b117_us_time.loc[:, 'rel_freq'] = b117_us_time['cum_num_samples'] / b117_us_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=b117_us_time['rel_freq'],
x=b117_us_time['date'],
name=f'{strain} samples',
mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_us_time.loc[b117_us_time['date']==first_detected, 'rel_freq'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states)} <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
if sdrop_us_time[sdrop_us_time['num_samples']>0].shape[0] > 0:
first_detected = sdrop_us_time.loc[sdrop_us_time['num_samples']>0]['date'].min()
first_states = sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'states'].values[0]
sdrop_us_time = sdrop_us_time[sdrop_us_time['date']>=first_detected]
sdrop_us_time.loc[:, 'cum_num_samples'] = sdrop_us_time['num_samples'].cumsum()
sdrop_us_time.loc[:, 'cum_total_samples'] = sdrop_us_time['total_samples'].cumsum()
sdrop_us_time.loc[:, 'rel_freq'] = sdrop_us_time['cum_num_samples'] / sdrop_us_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=sdrop_us_time['rel_freq'],
x=sdrop_us_time['date'],
name='biased sampling <br> (more info in a later section)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
fig.add_annotation(x=first_detected,
y=sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'rel_freq'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states)} <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Relative cumulative frequency of {strain} in USA',
xaxis_title='Collection Date',
template='plotly_white', autosize=True, showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))#, height=850,
return fig
def us_time(data, feature, values, res, strain='B117', country='USA', sampling_type='random'):
if len(values)==1:
results = (data.loc[(data[feature]==values[0]) &
(data['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
else:
# results = (data.groupby(['date', 'country', 'division', 'purpose_of_sequencing',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
results = (res[(res['is_vui']==True)
& (res['country']=='United States of America')]
.drop_duplicates(subset=['date', 'strain']))
if sampling_type!='random':
results['purpose_of_sequencing'] = 'S'
else:
results['purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_us_time = (random.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
b117_us_time.loc[:, 'states'] = b117_us_time['state_counts'].apply(lambda x: list(x[0]))
b117_us_time.loc[:, 'state_counts'] = b117_us_time['state_counts'].apply(lambda x: list(x[1]))
b117_us_time.loc[:, 'date'] = pd.to_datetime(b117_us_time['date'],
errors='coerce')
b117_us_time['cum_num_samples'] = b117_us_time['num_samples'].cumsum()
sdrop_us_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
state_counts=('division',
lambda x: np.unique(x,
return_counts=True))
)
.reset_index())
sdrop_us_time.loc[:, 'states'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[0]))
sdrop_us_time.loc[:, 'state_counts'] = sdrop_us_time['state_counts'].apply(lambda x: list(x[1]))
sdrop_us_time.loc[:, 'date'] = pd.to_datetime(sdrop_us_time['date'],
errors='coerce')
sdrop_us_time['cum_num_samples'] = sdrop_us_time['num_samples'].cumsum()
fig = go.Figure()
if b117_us_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=b117_us_time['cum_num_samples'],
x=b117_us_time['date'],
name=f'{strain} samples',
mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
first_detected = b117_us_time['date'].min()
first_states = b117_us_time.loc[b117_us_time['date']==first_detected, 'states']
fig.add_annotation(x=first_detected,
y=b117_us_time.loc[b117_us_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states.values[0])} <br> on <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
if sdrop_us_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=sdrop_us_time['cum_num_samples'],
x=sdrop_us_time['date'],
name='biased sampling <br> (see notes on sampling)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_us_time[['num_samples', 'states',
'state_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
first_detected = sdrop_us_time['date'].min()
first_states = sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'states']
fig.add_annotation(x=first_detected,
y=sdrop_us_time.loc[sdrop_us_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In US, {strain} 1st detected in <br> {', '.join(first_states.values[0])} <br> on <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-100)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Cumulative number of cases over time in {country}',
xaxis_title='Collection Date',
template='plotly_white', autosize=True, showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))#, height=850,
return fig
def ca_time_relative(data, feature, values, res,
strain='B117', state='California',
vocs=['B.1.1.7', 'B.1.1.70']):
if len(values)==1:
data.loc[:, 'weekday'] = data['date'].dt.weekday
data.loc[:, 'date'] = data['date'] - data['weekday'] * dt.timedelta(days=1)
results = (data.loc[(data[feature]==values[0])&
(data['division']==state)]
.drop_duplicates(subset=['date', 'strain']))
total_samples = (data[(~data['pangolin_lineage'].isin(vocs))&
(data['division']==state)]
.groupby('date')
.agg(total_samples=('strain', 'nunique')))
else:
res = res.copy()
# res.loc[:, 'tmp'] = res['date'].str.split('-')
# res = res[res['tmp'].str.len()>=3]
# res.loc[:, 'date'] = pd.to_datetime(res['date'], errors='coerce')
res.loc[:, 'weekday'] = res['date'].dt.weekday
res.loc[:, 'date'] = res['date'] - res['weekday'] * dt.timedelta(days=1)
total_samples = (res[(~res['pangolin_lineage'].isin(vocs))
&(res['division']==state)]
.groupby('date')
.agg(total_samples=('strain', 'nunique'))
.reset_index())
results = res[(res['is_vui']==True)
& (res['division']==state)].drop_duplicates(subset=['date', 'strain'])
results.loc[:, 'purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_ca_time = (random.groupby('date')
.agg(num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True)))
.reset_index())
b117_ca_time.loc[:, 'counties'] = b117_ca_time['county_counts'].apply(lambda x: list(x[0]))
b117_ca_time.loc[:, 'county_counts'] = b117_ca_time['county_counts'].apply(lambda x: list(x[1]))
# b117_ca_time.loc[:, 'date'] = pd.to_datetime(b117_ca_time['date'],
# errors='coerce')
b117_ca_time = pd.merge(b117_ca_time, total_samples, on='date', how='right')
b117_ca_time.loc[:, 'counties'].fillna('', inplace=True)
b117_ca_time.loc[:, ['num_samples', 'total_samples']] = b117_ca_time[['num_samples', 'total_samples']].fillna(0)
sdrop_ca_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True))
)
.reset_index())
sdrop_ca_time.loc[:, 'counties'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[0]))
sdrop_ca_time.loc[:, 'county_counts'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[1]))
# sdrop_ca_time.loc[:, 'date'] = pd.to_datetime(sdrop_ca_time['date'], errors='coerce')
sdrop_ca_time = pd.merge(sdrop_ca_time, total_samples, on='date', how='right')
sdrop_ca_time.loc[:, 'counties'].fillna('', inplace=True)
sdrop_ca_time.loc[:, ['num_samples', 'total_samples']].fillna(0, inplace=True)
fig = go.Figure()
if b117_ca_time[b117_ca_time['num_samples']>0].shape[0] > 0:
first_detected = b117_ca_time.loc[b117_ca_time['num_samples']>0]['date'].min()
first_counties = b117_ca_time.loc[b117_ca_time['date']==first_detected, 'counties'].values[0]
b117_ca_time = b117_ca_time[b117_ca_time['date']>=first_detected]
b117_ca_time.loc[:, 'cum_num_samples'] = b117_ca_time['num_samples'].cumsum()
b117_ca_time.loc[:, 'cum_total_samples'] = b117_ca_time['total_samples'].cumsum()
b117_ca_time.loc[:, 'rel_freq'] = b117_ca_time['cum_num_samples'] / b117_ca_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=b117_ca_time['rel_freq'],
x=b117_ca_time['date'],
name=f'{strain} samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>County(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per County: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
fig.add_annotation(x=first_detected,
y=b117_ca_time.loc[b117_ca_time['date']==first_detected, 'rel_freq'].values[0],
text=f"In CA, {strain} 1st detected in <br> {', '.join(first_counties)} county(s) <br> on week of <br> {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
if sdrop_ca_time[sdrop_ca_time['num_samples']>0].shape[0] > 0:
first_detected = sdrop_ca_time.loc[sdrop_ca_time['num_samples']>0]['date'].min()
first_counties = sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'counties'].values[0]
sdrop_ca_time = sdrop_ca_time[sdrop_ca_time['date']>=first_detected]
sdrop_ca_time.loc[:, 'cum_num_samples'] = sdrop_ca_time['num_samples'].cumsum()
sdrop_ca_time.loc[:, 'cum_total_samples'] = sdrop_ca_time['total_samples'].cumsum()
sdrop_ca_time.loc[:, 'rel_freq'] = sdrop_ca_time['cum_num_samples'] / sdrop_ca_time['cum_total_samples']
fig.add_trace(
go.Scatter(y=sdrop_ca_time['rel_freq'],
x=sdrop_ca_time['date'],
name='biased sampling (read next section)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"
)
)
fig.add_annotation(x=first_detected,
y=sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'rel_freq'].values[0],
text=f"""In CA, {strain} 1st detected in <br> {', '.join(first_counties)} county(s) <br> on week of <br> {first_detected.date()}""",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Relative cumulative frequency of {strain} in CA',
xaxis_title='Collection Date',
template='plotly_white', showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
),
autosize=True#, autosize=True
)#, height=850,
return fig
def ca_time(data, feature, values, res, strain='B117', state='California', sampling_type='random'):
if len(values)==1:
results = (data.loc[(data[feature]==values[0]) &
(data['division']==state)]
.drop_duplicates(subset=['date', 'strain']))
else:
# results = (data.groupby(['date', 'country', 'division',
# 'location', 'pangolin_lineage', 'strain'])
# .agg(mutations=('mutation', 'unique')).reset_index())
# results['is_vui'] = results['mutations'].apply(is_vui, args=(set(values),))
results = res[(res['is_vui']==True)
&(res['division']==state)].drop_duplicates(subset=['date', 'strain'])
if sampling_type!='random':
results['purpose_of_sequencing'] = 'S'
else:
results['purpose_of_sequencing'] = '?'
random = results[results['purpose_of_sequencing']=='?']
biased = results[results['purpose_of_sequencing']!='?']
b117_ca_time = (random.groupby('date')
.agg(num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True)))
.reset_index())
b117_ca_time.loc[:, 'counties'] = b117_ca_time['county_counts'].apply(lambda x: list(x[0]))
b117_ca_time.loc[:, 'county_counts'] = b117_ca_time['county_counts'].apply(lambda x: list(x[1]))
b117_ca_time.loc[:, 'date'] = pd.to_datetime(b117_ca_time['date'],
errors='coerce')
b117_ca_time.loc[:, 'cum_num_samples'] = b117_ca_time['num_samples'].cumsum()
sdrop_ca_time = (biased.groupby('date')
.agg(
num_samples=('strain', 'nunique'),
county_counts=('location',
lambda x: np.unique(x, return_counts=True))
)
.reset_index())
sdrop_ca_time.loc[:, 'counties'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[0]))
sdrop_ca_time.loc[:, 'county_counts'] = sdrop_ca_time['county_counts'].apply(lambda x: list(x[1]))
sdrop_ca_time.loc[:, 'date'] = pd.to_datetime(sdrop_ca_time['date'], errors='coerce')
sdrop_ca_time['cum_num_samples'] = sdrop_ca_time['num_samples'].cumsum()
fig = go.Figure()
if b117_ca_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=b117_ca_time['cum_num_samples'],
x=b117_ca_time['date'],
name=f'{strain} samples', mode='markers+lines',
line_color='rgba(220,20,60,.6)',
text=b117_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>County(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per County: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"))
first_detected = b117_ca_time['date'].min()
first_counties = b117_ca_time.loc[b117_ca_time['date']==first_detected, 'counties']
fig.add_annotation(x=first_detected,
y=b117_ca_time.loc[b117_ca_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In CA, {strain} 1st detected in <br> {', '.join(first_counties.values[0])} <br> on {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
if sdrop_ca_time.shape[0] > 0:
fig.add_trace(
go.Scatter(y=sdrop_ca_time['cum_num_samples'],
x=sdrop_ca_time['date'],
name='biased sampling <br> (see notes on sampling)',
mode='markers+lines',
line_color='rgba(30,144,255,.6)',
text=sdrop_ca_time[['num_samples', 'counties',
'county_counts', 'date']],
hovertemplate="<b>Number of cases: %{text[0]}</b><br>" +
"<b>State(s) Reported: %{text[1]}</b><br>" +
"<b>Cases per State: %{text[2]}</b><br>" +
"<b>Date: %{text[3]}</b><br>"
)
)
first_detected = sdrop_ca_time['date'].min()
first_counties = sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'counties']
fig.add_annotation(x=first_detected,
y=sdrop_ca_time.loc[sdrop_ca_time['date']==first_detected, 'cum_num_samples'].values[0],
text=f"In CA, {strain} 1st detected in <br> {', '.join(first_counties.values[0])} <br> on {first_detected.date()}",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-50)
fig.update_yaxes(side = 'right')
fig.update_layout(yaxis_title=f'Cumulative number of {strain} in CA',
xaxis_title='Collection Date',
template='plotly_white', autosize=True, showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))#, height=850,
return fig
def strain_nt_distance(data, feature, values, strain='B117', sample_sz=250, vocs=['B.1.1.7', 'B.1.351', 'B.1.1.70']):
clock_rate = 8e-4
if feature=='pangolin_lineage':
dists_df = create_lineage_data(data, feature, values, strain=strain, sample_sz=sample_sz, vocs=vocs)
elif feature=='mutation':
dists_df = create_distance_data(data, mutations=set(values), strain=strain, sample_sz=sample_sz, vocs=vocs)
else:
raise ValueError(f"Feature of type {feature} is not yet available for analysis. Aborting...")
dists_df['num_subs'] = dists_df['mutations'].str.len() / 29904
# ignore seqs with unexpectedly high dists
dists_df = dists_df[dists_df['num_subs']<=0.0013]
dists_df = dists_df[~dists_df['date'].isna()]
dists_df.loc[:, 'date'] = pd.to_datetime(dists_df['date'], errors='coerce')
dists_df['time'] = dists_df['date'].astype(str).apply(bv.decimal_date)
b117_model = ols('num_subs ~ time', data=dists_df[dists_df['group']!='outgroup']).fit()
b117_model.params['time'] = clock_rate
b117_preds = dists_df[dists_df['group']!='outgroup'].copy()
b117_model.params['Intercept'] = np.mean(b117_preds['num_subs'] - (clock_rate*b117_preds['time']))
b117_preds.loc[:, 'predictions'] = b117_model.predict(b117_preds['time'])
b117_n = int(b117_preds.shape[0] / 2)
outgrp_model = ols('num_subs ~ time',
data=dists_df[dists_df['group']=='outgroup']).fit()
outgrp_model.params['time'] = clock_rate
outgrp_preds = dists_df[dists_df['group']=='outgroup'].copy()
outgrp_model.params['Intercept'] = np.mean(outgrp_preds['num_subs'] - (clock_rate*outgrp_preds['time']))
outgrp_preds.loc[:, 'predictions'] = outgrp_model.predict(outgrp_preds['time'])
outgrp_n = int(outgrp_preds.shape[0] / 3)
fig = go.Figure(
data=go.Scatter(y=dists_df[dists_df['group']==f'Lineage {strain} in US']['num_subs'],
x=dists_df[dists_df['group']==f'Lineage {strain} in US']['date'],
name=f'{strain} (US)', mode='markers',
hovertemplate =
'Sample: %{text}',
marker_color='rgba(220,20,60,.6)'))
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']==f'Lineage {strain}']['num_subs'],
x=dists_df[dists_df['group']==f'Lineage {strain}']['date'],
mode='markers', marker_color='rgba(30,144,255,.6)',
name=f'{strain} (non-US)'
))
# fig.add_trace(go.Scatter(y=b117_preds['predictions'],
# x=b117_preds['date'], name='OLS (B.1.1.7)',
# mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=b117_preds.iloc[b117_n]['date'],
y=b117_preds.iloc[b117_n]['predictions'],
text=f"{strain} Lineage",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='outgroup']['num_subs'],
x=dists_df[dists_df['group']=='outgroup']['date'],
mode='markers', marker_color='rgb(211,211,211, .6)',
name='outgroup'
))
# fig.add_trace(go.Scatter(y=outgrp_preds['predictions'],
# x=outgrp_preds['date'], name='OLS (outgroup)',
# mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=outgrp_preds.iloc[outgrp_n]['date'],
y=outgrp_preds.iloc[outgrp_n]['predictions'],
text=f"outgroup",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.update_layout(yaxis_title='Genetic Distance (root-to-tip)',
xaxis_title='Collection Date',
template='plotly_white', autosize=True,
margin={"l":1},
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
)
)#, height=850,
fig.update_yaxes(side = 'right')
return fig
def create_lineage_data(data, feature, values, strain, sample_sz=250, vocs=['B.1.1.7', 'B.1.1.70', 'B.1.351']):
data = (data.groupby(['date', 'country', 'division',
'location', 'pangolin_lineage', 'strain'])
.agg(mutations=('mutation', 'unique')).reset_index())
first_detected = data.loc[data[feature].isin(values), 'date'].min()
mutations = set(data.loc[(data[feature].isin(values))
&(data['date']==first_detected), 'mutations'].explode().unique())
data['d_w'] = data['mutations'].apply(compute_similarity, args=(mutations,))
outgroup = (data[(~data[feature].isin(values))
&(~data['pangolin_lineage'].isin(vocs))]
.nlargest(sample_sz, 'd_w')['strain']
.unique())
try:
ingroup = data.loc[(data[feature].isin(values))].sample(sample_sz)['strain'].unique()
except:
ingroup = data.loc[(data[feature].isin(values))]['strain'].unique()
usgroup = data.loc[(data[feature].isin(values)) & (data['country']=='United States of America'), 'strain'].unique()
data = data.loc[(data['strain'].isin(ingroup)) | (data['strain'].isin(outgroup)) | (data['strain'].isin(usgroup))]
data.loc[:, 'group'] = 'nan'
data.loc[data['strain'].isin(outgroup), 'group'] = 'outgroup'
data.loc[(data['strain'].isin(ingroup)), 'group'] = f'Lineage {strain}'
data.loc[(data['strain'].isin(usgroup)), 'group'] = f'Lineage {strain} in US'
return data
def create_distance_data(data: pd.DataFrame, mutations: set, strain: str,
sample_sz: int=250, vocs: list=['B.1.1.7', 'B.1.351']):
data = (data.groupby(['date', 'country', 'division',
'location', 'pangolin_lineage', 'strain'])
.agg(mutations=('mutation', 'unique')).reset_index())
data['is_vui'] = data['mutations'].apply(is_vui, args=(mutations,))
ref_muts = extract_mutations(data)
data['d_w'] = data['mutations'].apply(compute_similarity, args=(ref_muts,))
outgroup = (data[(data['is_vui']==False)
&(~data['pangolin_lineage'].isin(vocs))]
.sample(sample_sz)['strain']
.unique())
try:
ingroup = data.loc[(data['is_vui']==True)].sample(sample_sz)['strain'].unique()
except:
ingroup = data.loc[(data['is_vui']==True)]['strain'].unique()
usgroup = data.loc[(data['is_vui']==True) & (data['country']=='United States of America'), 'strain'].unique()
data = data.loc[(data['strain'].isin(ingroup)) | (data['strain'].isin(outgroup)) | (data['strain'].isin(usgroup))]
data['group'] = 'outgroup'
data.loc[(data['strain'].isin(ingroup)), 'group'] = f'Lineage {strain}'
data.loc[(data['strain'].isin(usgroup)), 'group'] = f'Lineage {strain} in US'
return data
def is_vui(x, mutations: set):
return mutations.issubset(set(x))
def extract_mutations(data: pd.DataFrame):
first_detected = data.loc[data['is_vui']==True, 'date'].min()
mutations = data.loc[(data['is_vui']==True)
&(data['date']==first_detected), 'mutations'].explode().unique()
return set(mutations)
def compute_similarity(x, reference_mutations: set):
common_mutations = set(x) & reference_mutations
return len(common_mutations)
def b117_nt_distance(gisaid_data, tree_fp, b117_meta, sample_sz=250, clock_rate=8e-4):
# nabla_symbol = u"\u2207"
croft_meta = pd.read_csv(b117_meta, sep='\t')
croft_meta = croft_meta[croft_meta['Country']!='USA'].copy()
# extract B117 samples from Emma Croft's build
b117_meta = croft_meta[croft_meta['Pangolin Lineage']=='B.1.1.7'].sample(sample_sz)
# extract outgroup samples from Emma Croft's build
outgrp_meta = croft_meta[croft_meta['Pangolin Lineage']!='B.1.1.7'].sample(sample_sz)
# extract B117 US samples from GISAID
us_b117 = gisaid_data[(gisaid_data['country']=='United States of America')
& (gisaid_data['pangolin_lineage']=='B.1.1.7')].copy()
# consolidate data and analyze
b117_data = gisaid_data[(gisaid_data['strain'].isin(b117_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(outgrp_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(us_b117['strain'].unique()))].copy()
b117_data.drop_duplicates(subset=['strain', 'pos', 'alt_codon'], inplace=True)
# b117_data = b117_data[b117_data['gene']=='S']
dists_df = (b117_data.groupby(['strain', 'date'])
.agg(num_nt_subs=('strain', 'count'))
.reset_index())
dists_df['num_nt_subs'] = dists_df['num_nt_subs'] / 29903
dists_df = dists_df[~dists_df['date'].isna()]
dists_df.loc[:, 'group'] = 'outgroup'
dists_df.loc[dists_df['strain'].isin(b117_meta['Strain'].unique()), 'group'] = 'B.1.1.7 (non-US)'
dists_df.loc[dists_df['strain'].isin(us_b117['strain'].unique()), 'group'] = 'B.1.1.7 (US)'
dists_df = dists_df.loc[~((dists_df['group']=='outgroup') & (dists_df['num_nt_subs']>=0.001))]
dists_df.loc[:, 'date'] = pd.to_datetime(dists_df['date'], errors='coerce')
dists_df['time'] = dists_df['date'].astype(str).apply(bv.decimal_date)
b117_model = ols('num_nt_subs ~ time', data=dists_df[dists_df['group']!='outgroup']).fit()
b117_model.params['time'] = clock_rate
b117_preds = dists_df[dists_df['group']!='outgroup'].copy()
b117_model.params['Intercept'] = np.mean(b117_preds['num_nt_subs'] - (clock_rate*b117_preds['time']))
b117_preds.loc[:, 'predictions'] = b117_model.predict(b117_preds['time'])
b117_n = int(b117_preds.shape[0] / 2)
outgrp_model = ols('num_nt_subs ~ time',
data=dists_df[dists_df['group']=='outgroup']).fit()
outgrp_model.params['time'] = clock_rate
outgrp_preds = dists_df[dists_df['group']=='outgroup'].copy()
outgrp_model.params['Intercept'] = np.mean(outgrp_preds['num_nt_subs'] - (clock_rate*outgrp_preds['time']))
outgrp_preds.loc[:, 'predictions'] = outgrp_model.predict(outgrp_preds['time'])
outgrp_n = int(outgrp_preds.shape[0] / 3)
fig = go.Figure(
data=go.Scatter(y=dists_df[dists_df['group']=='B.1.1.7 (US)']['num_nt_subs'],
x=dists_df[dists_df['group']=='B.1.1.7 (US)']['date'],
name='B.1.1.7 (US)', mode='markers',
text=dists_df[dists_df['group']=='B.1.1.7 (US)']['strain'],
hovertemplate =
'Sample: %{text}',
marker_color='rgba(220,20,60,.6)'))
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='B.1.1.7 (non-US)']['num_nt_subs'],
x=dists_df[dists_df['group']=='B.1.1.7 (non-US)']['date'],
mode='markers', marker_color='rgba(30,144,255,.6)',
name='B.1.1.7 (non-US)'
))
fig.add_trace(go.Scatter(y=b117_preds['predictions'],
x=b117_preds['date'], name='OLS (B.1.1.7)',
mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=b117_preds.iloc[b117_n]['date'],
y=b117_preds.iloc[b117_n]['predictions'],
text=f"B117 Lineage",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='outgroup']['num_nt_subs'],
x=dists_df[dists_df['group']=='outgroup']['date'],
mode='markers', marker_color='rgb(211,211,211, .6)',
name='outgroup'
))
fig.add_trace(go.Scatter(y=outgrp_preds['predictions'],
x=outgrp_preds['date'], name='OLS (outgroup)',
mode='lines', line_color='rgba(0,0,0,1.)'))
fig.add_annotation(x=outgrp_preds.iloc[outgrp_n]['date'],
y=outgrp_preds.iloc[outgrp_n]['predictions'],
text=f"outgroup",
showarrow=True,
arrowhead=1, yshift=10, arrowsize=2, ay=-80)
fig.update_layout(yaxis_title='Genetic Distance (root-to-tip)',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
return fig
def b117_aa_distance(gisaid_data, b117_meta, sample_sz=250):
croft_meta = pd.read_csv(b117_meta, sep='\t')
croft_meta = croft_meta[croft_meta['Country']!='USA'].copy()
# extract B117 samples from Emma Croft's build
b117_meta = croft_meta[croft_meta['Pangolin Lineage']=='B.1.1.7'].sample(sample_sz)
# extract outgroup samples from Emma Croft's build
outgrp_meta = croft_meta[croft_meta['Pangolin Lineage']!='B.1.1.7'].sample(sample_sz)
# extract B117 US samples from GISAID
us_b117 = gisaid_data[(gisaid_data['country']=='United States of America')
& (gisaid_data['pangolin_lineage']=='B.1.1.7')].copy()
# consolidate data and analyze
b117_data = gisaid_data[(gisaid_data['strain'].isin(b117_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(outgrp_meta['Strain'].unique()))
|(gisaid_data['strain'].isin(us_b117['strain'].unique()))]
b117_data.loc[:, 'nonsyn'] = False
b117_data.loc[b117_data['ref_aa']!=b117_data['alt_aa'],
'nonsyn'] = True
b117_data.loc[:, 'S_nonsyn'] = False
b117_data.loc[(b117_data['gene']=='S') &
(b117_data['ref_aa']!=b117_data['alt_aa']),
'S_nonsyn'] = True
dists_df = (b117_data.groupby(['strain', 'date'])
.agg(num_nonsyn_muts=('nonsyn', 'sum'),
num_S_nonsyn_muts=('S_nonsyn', 'sum'))
.reset_index())
dists_df = dists_df[~dists_df['date'].isna()]
dists_df.loc[:, 'group'] = 'outgroup'
dists_df.loc[dists_df['strain'].isin(b117_meta['Strain'].unique()), 'group'] = 'B.1.1.7 (non-US)'
dists_df.loc[dists_df['strain'].isin(us_b117['strain'].unique()), 'group'] = 'B.1.1.7 (US)'
dists_df.loc[:, 'date'] = pd.to_datetime(dists_df['date'], errors='coerce')
dists_df.loc[:, 'month'] = dists_df['date'].dt.month
dists_df.loc[:, 'doy'] = dists_df['date'].dt.dayofyear
dists_df.loc[:, 'time'] = dists_df['date'].astype(int)/1e12
dists_df = dists_df.loc[~dists_df['doy'].isna()].copy()
b117_model = ols('num_nonsyn_muts ~ time', data=dists_df[dists_df['group']!='outgroup']).fit()
b117_preds = dists_df[dists_df['group']!='outgroup'].copy()
b117_preds.loc[:, 'predictions'] = b117_model.predict(b117_preds['time'])
outgrp_model = ols('num_nonsyn_muts ~ time',
data=dists_df[dists_df['group']=='outgroup']).fit()
outgrp_preds = dists_df[dists_df['group']=='outgroup'].copy()
outgrp_preds.loc[:, 'predictions'] = outgrp_model.predict(outgrp_preds['time'])
fig = go.Figure(
data=go.Scatter(y=dists_df[dists_df['group']=='B.1.1.7 (US)']['num_nonsyn_muts'],
x=dists_df[dists_df['group']=='B.1.1.7 (US)']['date'],
name='B.1.1.7 (US)', mode='markers',
marker_color='rgba(220,20,60,.6)'))
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='B.1.1.7 (non-US)']['num_nonsyn_muts'],
x=dists_df[dists_df['group']=='B.1.1.7 (non-US)']['date'],
mode='markers', marker_color='rgba(30,144,255,.6)',
name='B.1.1.7 (non-US)'
))
fig.add_trace(go.Scatter(y=b117_preds['predictions'],
x=b117_preds['date'], name='OLS (B.1.1.7)',
mode='lines', line_color='rgba(30,144,255,.6)'))
fig.add_trace(
go.Scatter(y=dists_df[dists_df['group']=='outgroup']['num_nonsyn_muts'],
x=dists_df[dists_df['group']=='outgroup']['date'],
mode='markers', marker_color='rgba(0,0,0,.6)',
name='outgroup'
))
fig.add_trace(go.Scatter(y=outgrp_preds['predictions'],
x=outgrp_preds['date'], name='OLS (outgroup)',
mode='lines', line_color='rgba(0,0,0,1.)'))
fig.update_layout(yaxis_title='Amino Acid Changes (root-to-tip)',
xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850,
return fig
def aa_distance(subs_fp, meta_fp, alpha=0.05):
alab_subs = pd.read_csv(subs_fp)
alab_subs.loc[:, 'nonsyn'] = False
alab_subs.loc[alab_subs['ref_aa']!=alab_subs['alt_aa'], 'nonsyn'] = True
alab_subs.loc[:, 'S_nonsyn'] = False
alab_subs.loc[(alab_subs['gene']=='S') & (alab_subs['ref_aa']!=alab_subs['alt_aa']), 'S_nonsyn'] = True
dists_df = (alab_subs.groupby('fasta_hdr')
.agg(num_nonsyn_muts=('nonsyn', 'sum'), num_S_nonsyn_muts=('S_nonsyn', 'sum'))
.reset_index())
meta = pd.read_csv(meta_fp)
sd_meta = meta[meta['location'].str.contains('San Diego')]
df = pd.merge(dists_df, sd_meta, on='fasta_hdr')
df.loc[:, 'date'] = pd.to_datetime(df['collection_date'], errors='coerce')
df.loc[:, 'month'] = df['date'].dt.month
df.loc[:, 'doy'] = df['date'].dt.dayofyear
df = df.loc[~df['doy'].isna()].copy()
model = ols('num_nonsyn_muts ~ doy', data=df).fit()
df.loc[:, 'predict'] = model.predict(df['doy'])
df.loc[:, 'p'] = model.outlier_test(method='fdr_bh')['fdr_bh(p)']
df.loc[:, 'outlier'] = False
df.loc[df['p']<alpha, 'outlier'] = True
fig = go.Figure(
data=go.Scatter(y=df[df['outlier']==False]['num_nonsyn_muts'],
x=df[df['outlier']==False]['date'],
name='samples', mode='markers',
marker_color='rgba(30,144,255,.6)'))
fig.add_trace(go.Scatter(y=df[df['outlier']==True]['num_nonsyn_muts'],
x=df[df['outlier']==True]['date'],
mode='markers',
marker_color='rgba(220,20,60,.6)', name='SoIs',
text=df[df['outlier']==True][['ID', 'date']],
hovertemplate =
"<b>%{text[0]}</b><br>" +
"<b>%{text[1]}</b><br>"))
fig.add_trace(go.Scatter(y=df['predict'], x=df['date'],
name='OLS', mode='lines',
line_color='rgba(0,0,0,1.)'))
fig.update_layout(yaxis_title='Amino Acid Changes (root-to-tip)', xaxis_title='Collection Date',
template='plotly_white', autosize=True)#, height=850, width=800)
return fig
def fetch_s_muts(series):
muts = [m for m in series.unique() if m[0]=='S']
return muts
def s_aa_distance(subs_fp, meta_fp, alpha=0.05):
alab_subs = pd.read_csv(subs_fp)
alab_subs.loc[:, 'mutation'] = alab_subs['gene']+':'+alab_subs['codon_num'].astype(str)+alab_subs['alt_aa']
alab_subs.loc[:, 'nonsyn'] = False
alab_subs.loc[alab_subs['ref_aa']!=alab_subs['alt_aa'], 'nonsyn'] = True
alab_subs.loc[:, 'S_nonsyn'] = False
alab_subs.loc[(alab_subs['gene']=='S') & (alab_subs['ref_aa']!=alab_subs['alt_aa']), 'S_nonsyn'] = True
alab_subs = alab_subs[alab_subs['S_nonsyn']==True]
dists_df = (alab_subs.groupby('fasta_hdr')
.agg(num_nonsyn_muts=('nonsyn', 'sum'),
num_S_nonsyn_muts=('S_nonsyn', 'sum'),
S_nonsyn_muts=('mutation', fetch_s_muts))
.reset_index())
meta = pd.read_csv(meta_fp)
sd_meta = meta[meta['location'].str.contains('San Diego')]
df = | pd.merge(dists_df, sd_meta, on='fasta_hdr') | pandas.merge |
import os
from datetime import datetime
import pandas as pd
from pytest import fixture
from socceraction.data.opta import (
OptaCompetitionSchema,
OptaGameSchema,
OptaPlayerSchema,
OptaTeamSchema,
)
from socceraction.data.opta.parsers import MA1JSONParser
@fixture()
def ma1json_parser() -> MA1JSONParser:
path = os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
"datasets",
"opta",
"ma1_408bfjw6uz5k19zk4am50ykmh.json",
)
return MA1JSONParser(str(path))
def test_extract_competitions(ma1json_parser: MA1JSONParser) -> None:
competitions = ma1json_parser.extract_competitions()
assert len(competitions) == 1
assert competitions[("722fdbecxzcq9788l6jqclzlw", "408bfjw6uz5k19zk4am50ykmh")] == {
"competition_id": "722fdbecxzcq9788l6jqclzlw",
"season_id": "408bfjw6uz5k19zk4am50ykmh",
"competition_name": "2. Bundesliga",
"season_name": "2015/2016",
}
OptaCompetitionSchema.validate(pd.DataFrame.from_dict(competitions, orient="index"))
def test_extract_games(ma1json_parser: MA1JSONParser) -> None:
games = ma1json_parser.extract_games()
assert len(games) == 1
assert games["bsu6pjne1eqz2hs8r3685vbhl"] == {
"game_id": "bsu6pjne1eqz2hs8r3685vbhl",
"season_id": "408bfjw6uz5k19zk4am50ykmh",
"competition_id": "722fdbecxzcq9788l6jqclzlw",
"game_day": 22,
"game_date": datetime(2016, 2, 20, 12, 0),
"home_team_id": "aojwbjr39s1w2mcd9l2bf2dhk",
"away_team_id": "kxpw3rqn4ukt7nqmtjj62lbn",
"venue": "BBBank Wildpark",
"away_score": 2,
"home_score": 2,
"duration": 93,
"attendance": 12746,
"referee": "<NAME>",
}
OptaGameSchema.validate( | pd.DataFrame.from_dict(games, orient="index") | pandas.DataFrame.from_dict |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(2, fill_value=0)
tm.assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self):
# GH#35488
df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_frame.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.tshift(1)
expected = datetime_frame.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, inferred_ts)
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_frame.tshift()
def test_period_index_frame_shift_with_freq(self):
ps = tm.makePeriodFrame()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame):
shifted = datetime_frame.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.shift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.shift(1, freq="infer")
expected = datetime_frame.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self):
ps = tm.makePeriodFrame()
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_frame_shift_with_freq_error(self, datetime_frame):
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([ | pd.Timestamp("2020-01-01") | pandas.Timestamp |
#!/usr/bin/env python
import os, sys
import pandas as pd
import subprocess as sp
from pdb import set_trace
sOutput_dir = sys.argv[1]
def Parsing_summary():
if not os.path.isdir("{outdir}/Summary_result".format(outdir=sOutput_dir)):
os.mkdir("{outdir}/Summary_result".format(outdir=sOutput_dir))
sp.call('cat {outdir}/Summary/*.txt > {outdir}/Summary/Summary_all.txt'.format(outdir=sOutput_dir), shell=True)
dfSummary = pd.read_table('{outdir}/Summary/Summary_all.txt'.format(outdir=sOutput_dir), header=None)
dfSummary.columns = ['Barcode', 'Total', 'Insertion', 'Deletion', 'Complex']
dfSummary = dfSummary.groupby(['Barcode']).sum()
dfSummary['Total_indel'] = dfSummary['Insertion'] + dfSummary['Deletion'] + dfSummary['Complex']
dfSummary['IND/TOT'] = dfSummary['Total_indel'] / dfSummary['Total']
dfSummary['IND/TOT'].fillna(0, inplace=True)
dfSummary.to_csv('{outdir}/Summary_result/Summary_result.tsv'.format(outdir=sOutput_dir), sep='\t')
def Annotate_final_result():
dfCount_INDEL = pd.read_table('{outdir}/result/freq/freq_result/Indel_summary.txt'.format(outdir=sOutput_dir), header=None)
dfSummary = pd.read_table('{outdir}/Summary_result/Summary_result.tsv'.format(outdir=sOutput_dir), index_col='Barcode')
dfCount_INDEL[0] = dfCount_INDEL[0].str.replace('.INDEL_freq.txt', '')
dfCount_INDEL.set_index(0, inplace=True)
dfConcat_result = | pd.concat([dfCount_INDEL, dfSummary.loc[:,['Total_indel', 'Total', 'IND/TOT']]],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with datetime-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(pd.timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
# ------------------------------------------------------------------
# Operations with timedelta-like others (including DateOffsets)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - NaT
tm.assert_equal(res, expected)
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with timedelta-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64arr_mul_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser * vector
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
| pd.Index([20, 30, 40]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 2 16:39:25 2019
@author: Shane
"""
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import scipy
import scipy.stats as stats
import glob
import statsmodels.stats.api as sms
#import matplotlib for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import ScalarFormatter, LogFormatter
import seaborn as sns
import math
from scipy.spatial import distance
#import os to handle operating system
import os
#=============================================================================
#import files to analyze
datadir = "D:\\Goode_Lab\\Projects\\actin_cables\\data\\summary_data\\"
#initalize data frame to append all data
df = pd.DataFrame()
#import data to dataframe
df = pd.read_csv(datadir + '201210_cdc28-13ts_t-8_t1_yBG12_yBG9_all_cable_analysis.csv')
#=============================================================================
#parse the data into the necessary strain types for plotting
#setup df with only yBG12 cells
df_hap = pd.DataFrame()
df_hap = df.loc[(df['strain']=='yBG12')].reset_index()
#setup df with only yBG9 cells
df_dip = pd.DataFrame()
df_dip = df.loc[(df['strain']=='yBG9')].reset_index()
#setup df with only uninduced cdc28 cells
df_cdcu = pd.DataFrame()
df_cdcu = df.loc[(df['strain']=='cdc28-13ts, t0')].reset_index()
#setup df with only induced cdc28 cells
df_cdci = pd.DataFrame()
df_cdci = df.loc[(df['strain']=='cdc28-13ts, t8')].reset_index()
#==============================================================================
# Bin the data frame by "cell diameter" with 10 bins...
d_bins = np.linspace(df.cell_diameter.min(), df.cell_diameter.max(), 11)
# Get the mean of parameters, binned by the values in cell diameter
d_binned_data = pd.DataFrame()
d_binned_data = df.groupby(pd.cut(df.cell_diameter, d_bins)).mean()
d_binned_err = pd.DataFrame()
d_binned_err = df.groupby(pd.cut(df.cell_diameter, d_bins)).std()
# Bin the data frame by "cell volume" with 10 bins...
v_bins = np.linspace(df.cell_volume.min(), df.cell_volume.max(), 11)
# Get the mean of parameters, binned by the values in cell diameter
v_binned_data = pd.DataFrame()
v_binned_data = df.groupby(pd.cut(df.cell_volume, v_bins)).mean()
v_binned_err = pd.DataFrame()
v_binned_err = df.groupby( | pd.cut(df.cell_volume, v_bins) | pandas.cut |
import itertools
import json
import logging
import os
import traceback
import uuid
from copy import deepcopy
from typing import Union, List, Dict
import genet.auxiliary_files as auxiliary_files
import genet.exceptions as exceptions
import genet.modify.change_log as change_log
import genet.modify.graph as modify_graph
import genet.modify.schedule as modify_schedule
import genet.outputs_handler.geojson as geojson
import genet.outputs_handler.matsim_xml_writer as matsim_xml_writer
import genet.outputs_handler.sanitiser as sanitiser
import genet.schedule_elements as schedule_elements
import genet.utils.dict_support as dict_support
import genet.utils.graph_operations as graph_operations
import genet.utils.pandas_helpers as pd_helpers
import genet.utils.parallel as parallel
import genet.utils.persistence as persistence
import genet.utils.plot as plot
import genet.utils.simplification as simplification
import genet.utils.spatial as spatial
import genet.validate.network_validation as network_validation
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
from pyproj import Transformer
from s2sphere import CellId
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
class Network:
def __init__(self, epsg):
self.epsg = epsg
self.transformer = Transformer.from_crs(epsg, 'epsg:4326', always_xy=True)
self.graph = nx.MultiDiGraph(name='Network graph', crs=self.epsg, simplified=False)
self.schedule = schedule_elements.Schedule(epsg)
self.change_log = change_log.ChangeLog()
self.auxiliary_files = {'node': {}, 'link': {}}
# link_id_mapping maps between (usually string literal) index per edge to the from and to nodes that are
# connected by the edge
self.link_id_mapping = {}
def __repr__(self):
return f"<{self.__class__.__name__} instance at {id(self)}: with \ngraph: {nx.info(self.graph)} and " \
f"\nschedule {self.schedule.info()}"
def __str__(self):
return self.info()
def add(self, other):
"""
This let's you add on `other` genet.Network to the network this method is called on.
This is deliberately not a magic function to discourage `new_network = network_1 + network_2` (and memory
goes out the window)
:param other:
:return:
"""
if self.is_simplified() != other.is_simplified():
raise RuntimeError('You cannot add simplified and non-simplified networks together')
# consolidate coordinate systems
if other.epsg != self.epsg:
logging.info(f'Attempting to merge two networks in different coordinate systems. '
f'Reprojecting from {other.epsg} to {self.epsg}')
other.reproject(other.epsg)
# consolidate node ids
other = graph_operations.consolidate_node_indices(self, other)
# consolidate link ids
other = graph_operations.consolidate_link_indices(self, other)
# finally, once the node and link ids have been sorted, combine the graphs
# nx.compose(left, right) overwrites data in left with data in right under matching ids
self.graph = nx.compose(other.graph, self.graph)
# finally, combine link_id_mappings
self.link_id_mapping = {**other.link_id_mapping, **self.link_id_mapping}
# combine schedules
self.schedule.add(other.schedule)
# merge change_log DataFrames
self.change_log = self.change_log.merge_logs(other.change_log)
def print(self):
print(self.info())
def info(self):
return f"Graph info: {nx.info(self.graph)} \nSchedule info: {self.schedule.info()}"
def plot(self, output_dir='', data=False):
"""
Plots the network graph and schedule on kepler map.
Ensure all prerequisites are installed https://docs.kepler.gl/docs/keplergl-jupyter#install
:param output_dir: output directory for the image, if passed, will save plot to html
:param data: Defaults to False, only the geometry and ID will be visible.
True will visualise all data on the map (not suitable for large networks)
A set of keys e.g. {'freespeed', 'capacity'}
:return:
"""
if not self.schedule:
logging.warning('This Network does not have a PT schedule. Only the graph will be visualised.')
return self.plot_graph(output_dir=output_dir)
network_links = self.to_geodataframe()['links']
schedule_routes = self.schedule_network_routes_geodataframe()
if data is not True:
network_links = sanitiser._subset_plot_gdf(data, network_links, base_keys={'id', 'geometry'})
schedule_routes = sanitiser._subset_plot_gdf(data, schedule_routes, base_keys={'route_id', 'geometry'})
m = plot.plot_geodataframes_on_kepler_map(
{'network_links': sanitiser.sanitise_geodataframe(network_links),
'schedule_routes': sanitiser.sanitise_geodataframe(schedule_routes)},
kepler_config='network_with_pt'
)
if output_dir:
persistence.ensure_dir(output_dir)
m.save_to_html(file_name=os.path.join(output_dir, 'network_with_pt_routes.html'))
return m
def plot_graph(self, output_dir='', data=False):
"""
Plots the network graph only on kepler map.
Ensure all prerequisites are installed https://docs.kepler.gl/docs/keplergl-jupyter#install
:param output_dir: output directory for the image, if passed, will save plot to html
:param data: Defaults to False, only the geometry and ID will be visible.
True will visualise all data on the map (not suitable for large networks)
A set of keys e.g. {'freespeed', 'capacity'}
:return:
"""
network_links = self.to_geodataframe()['links']
if data is not True:
network_links = sanitiser._subset_plot_gdf(data, network_links, base_keys={'id', 'geometry'})
m = plot.plot_geodataframes_on_kepler_map(
{'network_links': sanitiser.sanitise_geodataframe(network_links)},
kepler_config='network_with_pt'
)
if output_dir:
persistence.ensure_dir(output_dir)
m.save_to_html(file_name=os.path.join(output_dir, 'network_graph.html'))
return m
def plot_schedule(self, output_dir='', data=False):
"""
Plots original stop connections in the network's schedule over the network graph on kepler map.
Ensure all prerequisites are installed https://docs.kepler.gl/docs/keplergl-jupyter#install
:param output_dir: output directory for the image, if passed, will save plot to html
:param data: Defaults to False, only the geometry and ID will be visible.
True will visualise all data on the map (not suitable for large networks)
A set of keys e.g. {'freespeed', 'capacity'}
:return:
"""
network_links = self.to_geodataframe()['links']
schedule_gdf = self.schedule.to_geodataframe()
if data is not True:
network_links = sanitiser._subset_plot_gdf(data, network_links, base_keys={'id', 'geometry'})
schedule_gdf['links'] = sanitiser._subset_plot_gdf(data, schedule_gdf['links'],
base_keys={'route_id', 'geometry'})
schedule_gdf['nodes'] = sanitiser._subset_plot_gdf(data, schedule_gdf['nodes'],
base_keys={'id', 'geometry'})
m = plot.plot_geodataframes_on_kepler_map(
{'network_links': sanitiser.sanitise_geodataframe(network_links),
'schedule_links': sanitiser.sanitise_geodataframe(schedule_gdf['links']),
'schedule_stops': sanitiser.sanitise_geodataframe(schedule_gdf['nodes'])},
kepler_config='network_and_schedule'
)
if output_dir:
persistence.ensure_dir(output_dir)
m.save_to_html(file_name=os.path.join(output_dir, 'network_and_schedule.html'))
return m
def reproject(self, new_epsg, processes=1):
"""
Changes projection of the network to new_epsg
:param new_epsg: 'epsg:1234'
:param processes: max number of process to split computation across
:return:
"""
# reproject nodes
nodes_attribs = dict(self.nodes())
new_nodes_attribs = parallel.multiprocess_wrap(
data=nodes_attribs, split=parallel.split_dict, apply=modify_graph.reproj, combine=parallel.combine_dict,
processes=processes, from_proj=self.epsg, to_proj=new_epsg)
self.apply_attributes_to_nodes(new_nodes_attribs)
# reproject geometries
gdf_geometries = gpd.GeoDataFrame(self.link_attribute_data_under_keys(['geometry']), crs=self.epsg)
gdf_geometries = gdf_geometries.to_crs(new_epsg)
new_link_attribs = gdf_geometries.T.to_dict()
self.apply_attributes_to_links(new_link_attribs)
if self.schedule:
self.schedule.reproject(new_epsg, processes)
self.initiate_crs_transformer(new_epsg)
self.graph.graph['crs'] = self.epsg
def initiate_crs_transformer(self, epsg):
self.epsg = epsg
if epsg != 'epsg:4326':
self.transformer = Transformer.from_crs(epsg, 'epsg:4326', always_xy=True)
else:
self.transformer = None
def simplify(self, no_processes=1):
if self.is_simplified():
raise RuntimeError('This network has already been simplified. You cannot simplify the graph twice.')
simplification.simplify_graph(self, no_processes)
# mark graph as having been simplified
self.graph.graph["simplified"] = True
def is_simplified(self):
return self.graph.graph["simplified"]
def node_attribute_summary(self, data=False):
"""
Parses through data stored on nodes and gives a summary tree of the data stored on the nodes.
If data is True, shows also up to 5 unique values stored under such keys.
:param data: bool, False by default
:return:
"""
root = graph_operations.get_attribute_schema(self.nodes(), data=data)
graph_operations.render_tree(root, data)
def node_attribute_data_under_key(self, key):
"""
Generates a pandas.Series object indexed by node ids, with data stored on the nodes under `key`
:param key: either a string e.g. 'x', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:return: pandas.Series
"""
data = graph_operations.get_attribute_data_under_key(self.nodes(), key)
return pd.Series(data, dtype=pd_helpers.get_pandas_dtype(data))
def node_attribute_data_under_keys(self, keys: Union[list, set], index_name=None):
"""
Generates a pandas.DataFrame object indexed by link ids, with data stored on the nodes under `key`
:param keys: list of either a string e.g. 'x', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:param index_name: optional, gives the index_name to dataframes index
:return: pandas.DataFrame
"""
return graph_operations.build_attribute_dataframe(self.nodes(), keys=keys, index_name=index_name)
def link_attribute_summary(self, data=False):
"""
Parses through data stored on links and gives a summary tree of the data stored on the links.
If data is True, shows also up to 5 unique values stored under such keys.
:param data: bool, False by default
:return:
"""
root = graph_operations.get_attribute_schema(self.links(), data=data)
graph_operations.render_tree(root, data)
def link_attribute_data_under_key(self, key: Union[str, dict]):
"""
Generates a pandas.Series object indexed by link ids, with data stored on the links under `key`
:param key: either a string e.g. 'modes', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:return: pandas.Series
"""
return pd.Series(graph_operations.get_attribute_data_under_key(self.links(), key))
def link_attribute_data_under_keys(self, keys: Union[list, set], index_name=None):
"""
Generates a pandas.DataFrame object indexed by link ids, with data stored on the links under `key`
:param keys: list of either a string e.g. 'modes', or if accessing nested information, a dictionary
e.g. {'attributes': {'osm:way:name': 'text'}}
:param index_name: optional, gives the index_name to dataframes index
:return: pandas.DataFrame
"""
return graph_operations.build_attribute_dataframe(self.links(), keys=keys, index_name=index_name)
def extract_nodes_on_node_attributes(self, conditions: Union[list, dict], how=any, mixed_dtypes=True):
"""
Extracts graph node IDs based on values of attributes saved on the nodes. Fails silently,
assumes not all nodes have all of the attributes. In the case were the attributes stored are
a list or set, like in the case of a simplified network (there will be a mix of objects that are sets and not)
an intersection of values satisfying condition(s) is considered in case of iterable value, if not empty, it is
deemed successful by default. To disable this behaviour set mixed_dtypes to False.
:param conditions: {'attribute_key': 'target_value'} or nested
{'attribute_key': {'another_key': {'yet_another_key': 'target_value'}}}, where 'target_value' could be
- single value, string, int, float, where the edge_data[key] == value
(if mixed_dtypes==True and in case of set/list edge_data[key], value is in edge_data[key])
- list or set of single values as above, where edge_data[key] in [value1, value2]
(if mixed_dtypes==True and in case of set/list edge_data[key],
set(edge_data[key]) & set([value1, value2]) is non-empty)
- for int or float values, two-tuple bound (lower_bound, upper_bound) where
lower_bound <= edge_data[key] <= upper_bound
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] satisfies lower_bound <= item <= upper_bound)
- function that returns a boolean given the value e.g.
def below_exclusive_upper_bound(value):
return value < 100
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] returns True after applying function)
:param how : {all, any}, default any
The level of rigour used to match conditions
* all: means all conditions need to be met
* any: means at least one condition needs to be met
:param mixed_dtypes: True by default, used if values under dictionary keys queried are single values or lists of
values e.g. as in simplified networks.
:return: list of node ids in the network satisfying conditions
"""
return graph_operations.extract_on_attributes(
self.nodes(), conditions=conditions, how=how, mixed_dtypes=mixed_dtypes)
def extract_links_on_edge_attributes(self, conditions: Union[list, dict], how=any, mixed_dtypes=True):
"""
Extracts graph link IDs based on values of attributes saved on the edges. Fails silently,
assumes not all links have those attributes. In the case were the attributes stored are
a list or set, like in the case of a simplified network (there will be a mix of objects that are sets and not)
an intersection of values satisfying condition(s) is considered in case of iterable value, if not empty, it is
deemed successful by default. To disable this behaviour set mixed_dtypes to False.
:param conditions: {'attribute_key': 'target_value'} or nested
{'attribute_key': {'another_key': {'yet_another_key': 'target_value'}}}, where 'target_value' could be
- single value, string, int, float, where the edge_data[key] == value
(if mixed_dtypes==True and in case of set/list edge_data[key], value is in edge_data[key])
- list or set of single values as above, where edge_data[key] in [value1, value2]
(if mixed_dtypes==True and in case of set/list edge_data[key],
set(edge_data[key]) & set([value1, value2]) is non-empty)
- for int or float values, two-tuple bound (lower_bound, upper_bound) where
lower_bound <= edge_data[key] <= upper_bound
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] satisfies lower_bound <= item <= upper_bound)
- function that returns a boolean given the value e.g.
def below_exclusive_upper_bound(value):
return value < 100
(if mixed_dtypes==True and in case of set/list edge_data[key], at least one item in
edge_data[key] returns True after applying function)
:param how : {all, any}, default any
The level of rigour used to match conditions
* all: means all conditions need to be met
* any: means at least one condition needs to be met
:param mixed_dtypes: True by default, used if values under dictionary keys queried are single values or lists of
values e.g. as in simplified networks.
:return: list of link ids in the network satisfying conditions
"""
return graph_operations.extract_on_attributes(
self.links(), conditions=conditions, how=how, mixed_dtypes=mixed_dtypes)
def links_on_modal_condition(self, modes: Union[str, list]):
"""
Finds link IDs with modes or singular mode given in `modes`
:param modes: string mode e.g. 'car' or a list of such modes e.g. ['car', 'walk']
:return: list of link IDs
"""
return self.extract_links_on_edge_attributes(conditions={'modes': modes}, mixed_dtypes=True)
def nodes_on_modal_condition(self, modes: Union[str, list]):
"""
Finds node IDs with modes or singular mode given in `modes`
:param modes: string mode e.g. 'car' or a list of such modes e.g. ['car', 'walk']
:return: list of link IDs
"""
links = self.links_on_modal_condition(modes)
nodes = {self.link(link)['from'] for link in links} | {self.link(link)['to'] for link in links}
return list(nodes)
def modal_subgraph(self, modes: Union[str, set, list]):
return self.subgraph_on_link_conditions(conditions={'modes': modes}, mixed_dtypes=True)
def nodes_on_spatial_condition(self, region_input):
"""
Returns node IDs which intersect region_input
:param region_input:
- path to a geojson file, can have multiple features
- string with comma separated hex tokens of Google's S2 geometry, a region can be covered with cells and
the tokens string copied using http://s2.sidewalklabs.com/regioncoverer/
e.g. '89c25985,89c25987,89c2598c,89c25994,89c25999ffc,89c2599b,89c259ec,89c259f4,89c25a1c,89c25a24'
- shapely.geometry object, e.g. Polygon or a shapely.geometry.GeometryCollection of such objects
:return: node IDs
"""
if not isinstance(region_input, str):
# assumed to be a shapely.geometry input
gdf = self.to_geodataframe()['nodes'].to_crs("epsg:4326")
return self._find_ids_on_shapely_geometry(gdf, how='intersect', shapely_input=region_input)
elif persistence.is_geojson(region_input):
gdf = self.to_geodataframe()['nodes'].to_crs("epsg:4326")
return self._find_ids_on_geojson(gdf, how='intersect', geojson_input=region_input)
else:
# is assumed to be hex
return self._find_node_ids_on_s2_geometry(region_input)
def links_on_spatial_condition(self, region_input, how='intersect'):
"""
Returns link IDs which intersect region_input
:param region_input:
- path to a geojson file, can have multiple features
- string with comma separated hex tokens of Google's S2 geometry, a region can be covered with cells and
the tokens string copied using http://s2.sidewalklabs.com/regioncoverer/
e.g. '89c25985,89c25987,89c2598c,89c25994,89c25999ffc,89c2599b,89c259ec,89c259f4,89c25a1c,89c25a24'
- shapely.geometry object, e.g. Polygon or a shapely.geometry.GeometryCollection of such objects
:param how:
- 'intersect' default, will return IDs of the Services whose at least one Stop intersects the
region_input
- 'within' will return IDs of the Services whose all of the Stops are contained within the region_input
:return: link IDs
"""
gdf = self.to_geodataframe()['links'].to_crs("epsg:4326")
if not isinstance(region_input, str):
# assumed to be a shapely.geometry input
return self._find_ids_on_shapely_geometry(gdf, how, region_input)
elif persistence.is_geojson(region_input):
return self._find_ids_on_geojson(gdf, how, region_input)
else:
# is assumed to be hex
return self._find_link_ids_on_s2_geometry(gdf, how, region_input)
def subnetwork(self, links: Union[list, set], services: Union[list, set] = None,
strongly_connected_modes: Union[list, set] = None, n_connected_components: int = 1):
"""
Subset a Network object using a collection of link IDs and (optionally) service IDs
:param links: Link IDs to be retained in the new Network
:param services: optional, collection of service IDs in the Schedule for subsetting.
:param strongly_connected_modes: modes in the network that need to be strongly connected. For MATSim those
are modes that agents are allowed to route on. Defaults to {'car', 'walk', 'bike'}
:param n_connected_components: number of expected strongly connected components for
`the strongly_connected_modes`. Defaults to 1, as that is what MATSim expects. Other number may be used
if disconnected islands are expected, and then connected up using the `connect_components` method.
:return: A new Network object that is a subset of the original
"""
logging.info('Subsetting a Network will likely result in a disconnected network graph. A cleaner will be ran '
'that will remove links to make the resulting Network strongly connected for modes: '
'car, walk, bike.')
subnetwork = Network(epsg=self.epsg)
links = set(links)
if self.schedule:
if services:
logging.info(
f'Schedule will be subsetted using given services: {services}. Links pertaining to their '
'network routes will also be retained.')
subschedule = self.schedule.subschedule(services)
routes = subschedule.route_attribute_data(keys=['route'])
links = links | set(np.concatenate(routes['route'].values))
subnetwork.schedule = subschedule
subnetwork.graph = self.subgraph_on_link_conditions(conditions={'id': links})
subnetwork.link_id_mapping = {k: v for k, v in self.link_id_mapping.items() if k in links}
if strongly_connected_modes is None:
logging.info("Param: strongly_connected_modes is defaulting to `{'car', 'walk', 'bike'}` "
"You can change this behaviour by passing the parameter.")
strongly_connected_modes = {'car', 'walk', 'bike'}
for mode in strongly_connected_modes:
if not subnetwork.is_strongly_connected(modes=mode):
logging.warning(f'The graph for mode {mode} is not strongly connected. '
f'The largest {n_connected_components} connected components will be extracted.')
if n_connected_components > 1:
logging.info('Number of requested connected components is larger than 1. Consider using '
'`connect_components` method to create modal graphs that are strongly connected.')
subnetwork.retain_n_connected_subgraphs(n=n_connected_components, mode=mode)
# TODO Inherit and subset Auxiliary files
logging.info('Subsetted Network is ready - do not forget to validate and visualise your subset!')
return subnetwork
def subnetwork_on_spatial_condition(self, region_input, how='intersect',
strongly_connected_modes: Union[list, set] = None,
n_connected_components: int = 1):
"""
Subset a Network object using a spatial bound
:param region_input:
- path to a geojson file, can have multiple features
- string with comma separated hex tokens of Google's S2 geometry, a region can be covered with cells and
the tokens string copied using http://s2.sidewalklabs.com/regioncoverer/
e.g. '89c25985,89c25987,89c2598c,89c25994,89c25999ffc,89c2599b,89c259ec,89c259f4,89c25a1c,89c25a24'
- shapely.geometry object, e.g. Polygon or a shapely.geometry.GeometryCollection of such objects
:param how:
- 'intersect' default, will return IDs of the Services whose at least one Stop intersects the
region_input
- 'within' will return IDs of the Services whose all of the Stops are contained within the region_input
:param strongly_connected_modes: modes in the network that need to be strongly connected. For MATSim those
are modes that agents are allowed to route on. Defaults to {'car', 'walk', 'bike'}
:param n_connected_components: number of expected strongly connected components for
`the strongly_connected_modes`. Defaults to 1, as that is what MATSim expects. Other number may be used
if disconnected islands are expected, and then connected up using the `connect_components` method.
:return: A new Network object that is a subset of the original
"""
if self.schedule:
services_to_keep = self.schedule.services_on_spatial_condition(region_input=region_input, how=how)
else:
services_to_keep = None
subset_links = set(self.links_on_spatial_condition(region_input=region_input, how=how))
return self.subnetwork(links=subset_links, services=services_to_keep,
strongly_connected_modes=strongly_connected_modes,
n_connected_components=n_connected_components)
def remove_mode_from_links(self, links: Union[set, list], mode: Union[set, list, str]):
"""
Method to remove modes from links. Deletes links which have no mode left after the process.
:param links: collection of link IDs to remove the mode from
:param mode: which mode to remove
:return: updates graph
"""
def empty_modes(mode_attrib):
if not mode_attrib:
return True
return False
links = self._setify(links)
mode = self._setify(mode)
df = self.link_attribute_data_under_keys(['modes'])
extra = links - set(df.index)
if extra:
logging.warning(f'The following links are not present: {extra}')
df['modes'] = df['modes'].apply(lambda x: self._setify(x))
df = df.loc[links & set(df.index)][df['modes'].apply(lambda x: bool(mode & x))]
df['modes'] = df['modes'].apply(lambda x: x - mode)
self.apply_attributes_to_links(df.T.to_dict())
# remove links without modes
no_mode_links = graph_operations.extract_on_attributes(
self.links(),
{'modes': empty_modes},
mixed_dtypes=False
)
self.remove_links(no_mode_links)
def retain_n_connected_subgraphs(self, n: int, mode: str):
"""
Method to remove modes from link which do not belong to largest connected n components. Deletes links which
have no mode left after the process.
:param n: number of components to retain
:param mode: which mode to consider
:return: updates graph
"""
modal_subgraph = self.modal_subgraph(mode)
# calculate how many connected subgraphs there are
connected_components = network_validation.find_connected_subgraphs(modal_subgraph)
connected_components_nodes = []
for i in range(0, n):
connected_components_nodes += connected_components[i][0]
connected_subgraphs_to_extract = modal_subgraph.subgraph(connected_components_nodes).copy().edges.data('id')
diff_links = set([e[2] for e in modal_subgraph.edges.data('id')]) - set(
[e[2] for e in connected_subgraphs_to_extract])
logging.info(f'Extracting largest connected components resulted in mode: {mode} being deleted from '
f'{len(diff_links)} edges')
self.remove_mode_from_links(diff_links, mode)
def _find_ids_on_geojson(self, gdf, how, geojson_input):
shapely_input = spatial.read_geojson_to_shapely(geojson_input)
return self._find_ids_on_shapely_geometry(gdf=gdf, how=how, shapely_input=shapely_input)
def _find_ids_on_shapely_geometry(self, gdf, how, shapely_input):
if how == 'intersect':
return list(gdf[gdf.intersects(shapely_input)]['id'])
if how == 'within':
return list(gdf[gdf.within(shapely_input)]['id'])
else:
raise NotImplementedError('Only `intersect` and `contain` options for `how` param.')
def _find_node_ids_on_s2_geometry(self, s2_input):
cell_union = spatial.s2_hex_to_cell_union(s2_input)
return [_id for _id, s2_id in self.graph.nodes(data='s2_id') if cell_union.intersects(CellId(s2_id))]
def _find_link_ids_on_s2_geometry(self, gdf, how, s2_input):
gdf['geometry'] = gdf['geometry'].apply(lambda x: spatial.swap_x_y_in_linestring(x))
gdf['s2_geometry'] = gdf['geometry'].apply(lambda x: spatial.generate_s2_geometry(x))
gdf = gdf.set_index('id')
links = gdf['s2_geometry'].T.to_dict()
cell_union = spatial.s2_hex_to_cell_union(s2_input)
if how == 'intersect':
return [_id for _id, s2_geom in links.items() if
any([cell_union.intersects(CellId(s2_id)) for s2_id in s2_geom])]
elif how == 'within':
return [_id for _id, s2_geom in links.items() if
all([cell_union.intersects(CellId(s2_id)) for s2_id in s2_geom])]
else:
raise NotImplementedError('Only `intersect` and `within` options for `how` param.')
def add_node(self, node: Union[str, int], attribs: dict = None, silent: bool = False):
"""
Adds a node.
:param node:
:param attribs: should include spatial information x,y in epsg cosistent with the network or lat lon in
epsg:4326
:param silent: whether to mute stdout logging messages
:return:
"""
if attribs is not None:
self.graph.add_node(node, **attribs)
else:
self.graph.add_node(node)
self.change_log.add(object_type='node', object_id=node, object_attributes=attribs)
if not silent:
logging.info(f'Added Node with index `{node}` and data={attribs}')
return node
def add_nodes(self, nodes_and_attribs: dict, silent: bool = False, ignore_change_log: bool = False):
"""
Adds nodes, reindexes if indices are clashing with nodes already in the network
:param nodes_and_attribs: {index_for_node: {attribute dictionary for that node}}
:param silent: whether to mute stdout logging messages
:param ignore_change_log: whether to ignore logging changes to the network in the changelog. False by default
and not recommended. Only used when an alternative changelog event is being produced (e.g. simplification) to
reduce changelog bloat.
:return:
"""
# check for clashing nodes
clashing_node_ids = set(dict(self.nodes()).keys()) & set(nodes_and_attribs.keys())
df_nodes = pd.DataFrame(nodes_and_attribs).T
reindexing_dict = {}
if df_nodes.empty:
df_nodes = pd.DataFrame({'id': list(nodes_and_attribs.keys())})
elif ('id' not in df_nodes.columns) or (df_nodes['id'].isnull().any()):
df_nodes['id'] = df_nodes.index
if clashing_node_ids:
reindexing_dict = dict(
zip(clashing_node_ids, self.generate_indices_for_n_nodes(
len(nodes_and_attribs), avoid_keys=set(nodes_and_attribs.keys()))))
clashing_mask = df_nodes['id'].isin(reindexing_dict.keys())
df_nodes.loc[clashing_mask, 'id'] = df_nodes.loc[clashing_mask, 'id'].map(reindexing_dict)
df_nodes = df_nodes.set_index('id', drop=False)
nodes_and_attribs_to_add = df_nodes.T.to_dict()
self.graph.add_nodes_from([(node_id, attribs) for node_id, attribs in nodes_and_attribs_to_add.items()])
if not ignore_change_log:
self.change_log = self.change_log.add_bunch(object_type='node',
id_bunch=list(nodes_and_attribs_to_add.keys()),
attributes_bunch=list(nodes_and_attribs_to_add.values()))
if not silent:
logging.info(f'Added {len(nodes_and_attribs)} nodes')
return reindexing_dict, nodes_and_attribs_to_add
def add_edge(self, u: Union[str, int], v: Union[str, int], multi_edge_idx: int = None, attribs: dict = None,
silent: bool = False):
"""
Adds an edge between u and v. If an edge between u and v already exists, adds an additional one. Generates
link id. If you already have a link id, use the method to add_link.
:param u: node in the graph
:param v: node in the graph
:param multi_edge_idx: you can specify which multi index to use if there are other edges between u and v.
Will generate new index if already used.
:param attribs:
:param silent: whether to mute stdout logging messages
:return:
"""
link_id = self.generate_index_for_edge(silent=silent)
self.add_link(link_id, u, v, multi_edge_idx, attribs, silent)
if not silent:
logging.info(f'Added edge from `{u}` to `{v}` with link_id `{link_id}`')
return link_id
def add_edges(self, edges_attributes: List[dict], silent: bool = False, ignore_change_log: bool = False):
"""
Adds multiple edges, generates their unique link ids
:param edges_attributes: List of edges, each item in list is a dictionary defining the edge attributes,
contains at least 'from': node_id and 'to': node_id entries,
:param silent: whether to mute stdout logging messages
:param ignore_change_log: whether to ignore logging changes to the network in the changelog. False by default
and not recommended. Only used when an alternative changelog event is being produced (e.g. simplification) to
reduce changelog bloat.
:return:
"""
# check for compulsory attribs
df_edges = | pd.DataFrame(edges_attributes) | pandas.DataFrame |
import pandas as pd
import cv2
import pygame
import numpy as np
from movement_detector.detectors import AbstractMovementDetector
class Interface:
"""
This class displays the video, overlays metadata, and enables user-control.
"""
def __init__(self, detector: AbstractMovementDetector):
self.detector = detector
self._play_video = False
self._frame_index = 0
self._playback_frame_rate = self.detector.video.frame_rate
self._player = pygame.display.set_mode(
self.detector.video.frame_shape[1::-1],
pygame.RESIZABLE
)
self._clock = pygame.time.Clock()
self._space_pressed = False
self._key_repeat_buffer = 600
def display(self, stop_keys=('N', 'P', 27)):
vid_name = self.detector.video.vid_name
self._play_video = False
self._frame_index = 0
time_since_last_frame = 0
quit_ = False
keys = None
command_text = ''
command_print_count = 0
command_print_max = max(self._key_repeat_buffer, 10)
keys_pressed = []
time_since_key_press = 0
while True:
tick = self._clock.tick()
if self._frame_index == len(self.detector.video):
self._play_video = False
else:
frame = self._build_frame(action_text=command_text)
if command_text != '':
command_print_count += 1
if command_print_count == command_print_max:
command_text = ''
command_print_count = 0
pygame.display.set_caption(
f'{vid_name} - Frame {self._frame_index + 1}'
)
pygame.surfarray.blit_array(self._player, frame)
pygame.display.update()
keys_pressed = pygame.key.get_pressed()
if any(keys_pressed):
if (time_since_key_press == 0
or time_since_key_press >= self._key_repeat_buffer):
new_command_text = self._parse_command(keys=keys_pressed)
time_since_key_press += tick
if new_command_text != '':
command_text = new_command_text
else:
time_since_key_press = 0
if self._space_pressed:
self._space_pressed = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit_ = True
if quit_:
break
if self._play_video:
time_since_last_frame += tick
if time_since_last_frame >= 1/self._playback_frame_rate:
self._frame_index += 1
time_since_last_frame = 0
else:
time_since_last_frame = 0
return keys_pressed
def _build_frame(self, action_text=''):
frame = self.detector.video[self._frame_index]
meta_data = self.detector.meta(
start=self._frame_index,
stop=self._frame_index + 1
)
self._add_moving_text(frame=frame, meta_data=meta_data)
self._add_outlier_text(frame=frame, meta_data=meta_data)
self._add_frame_rate_text(frame=frame)
self._add_action_text(frame=frame, action_text=action_text)
frame = np.flipud(np.rot90(frame))
# frame = pygame.surfarray.make_surface(frame)
return frame
@staticmethod
def _add_moving_text(frame, meta_data):
if pd.isna(meta_data['moving'].iloc[0]):
colour = (0, 0, 255)
status_text = 'Loading info'
elif meta_data['moving'].iloc[0]:
colour = (0, 0, 255)
status_text = 'Moving'
else:
colour = (0, 255, 0)
status_text = 'Freezing'
cv2.putText(
img=frame,
text=status_text,
org=(10, 20),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=.5,
color=colour,
thickness=2,
)
@staticmethod
def _add_outlier_text(frame, meta_data):
outlier_text = ''
if pd.isna(meta_data['outlier'].iloc[0]):
colour = (0, 0, 255)
outlier_text = 'Loading info'
elif meta_data['manual_set'].iloc[0]:
colour = (0, 255, 0)
outlier_text = 'User-verified'
elif meta_data['flagged'].iloc[0]:
colour = (0, 0, 255)
outlier_text = 'Flagged'
else:
colour = (255, 165, 0)
cv2.putText(
img=frame,
text=outlier_text,
org=(10, 40),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=.5,
color=colour,
thickness=2
)
def _add_frame_rate_text(self, frame):
frame_rate = np.round(self._playback_frame_rate, decimals=2)
frame_rate_text = f'Frame rate: {frame_rate}'
cv2.putText(
img=frame,
text=frame_rate_text,
org=(10, 60),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=.5,
color=(255, 165, 0),
thickness=2
)
def _add_action_text(self, frame, action_text):
cv2.putText(
img=frame,
text=action_text,
org=(10, 80),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=.5,
color=(255, 165, 0),
thickness=2
)
def _parse_command(self, keys):
command_text = ''
if not self._play_video:
if keys[pygame.K_LEFT]:
if self._frame_index != 0:
self._frame_index -= 1
command_text = 'Previous frame'
elif keys[pygame.K_RIGHT]:
if self._frame_index != len(self.detector.video) - 1:
self._frame_index += 1
command_text = 'Next frame'
elif keys[ord('f')]:
self.detector.set_freezing(self._frame_index)
if self._frame_index != len(self.detector.video) - 1:
self._frame_index += 1
command_text = 'Set freezing'
elif keys[ord('m')]:
self.detector.set_moving(self._frame_index)
if self._frame_index != len(self.detector.video) - 1:
self._frame_index += 1
command_text = 'Set moving'
elif keys[ord('n')]:
search_started = False
while True:
if self._frame_index == len(self.detector.video) - 1:
break
meta_data = self.detector.meta(self._frame_index)
if | pd.isna(meta_data['flagged'].iloc[0]) | pandas.isna |
import pytest
import numpy as np
import pandas as pd
from rapidfuzz import fuzz
from polyfuzz.models import BaseMatcher
from tests.utils import get_test_strings
from_list, to_list = get_test_strings()
class MyIncorrectModel(BaseMatcher):
pass
class MyCorrectModel(BaseMatcher):
def match(self, from_list, to_list):
# Calculate distances
matches = [[fuzz.ratio(from_string, to_string) / 100 for to_string in to_list] for from_string in from_list]
# Get best matches
mappings = [to_list[index] for index in np.argmax(matches, axis=1)]
scores = np.max(matches, axis=1)
# Prepare dataframe
matches = | pd.DataFrame({'From': from_list, 'To': mappings, 'Similarity': scores}) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_datetime_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as datetime and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-03'),
pd.to_datetime('2020-08-04'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_column_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
| pd.to_datetime('2020-10-03') | pandas.to_datetime |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
#slab1data.to_csv('slab1data.csv',header=True,index=False)
''' ------ (9) Define Trench Locations ------'''
TR_data = pd.read_csv(trenches)
if slab == 'izu' or slab == 'kur':
TR_data = TR_data[TR_data.slab == 'jap']
else:
TR_data = TR_data[TR_data.slab == slab]
TR_data = TR_data.reset_index(drop=True)
TR_data.loc[TR_data.lon < 0, 'lon']+=360
''' ------ (10) Open and modify input dataset ------'''
eventlistALL = pd.read_table('%s' % inFile, sep=',', dtype={
'lon': np.float64, 'lat': np.float64,'depth': np.float64,
'unc': np.float64, 'etype': str, 'ID': np.int, 'mag': np.float64,
'S1': np.float64, 'D1': np.float64, 'R1': np.float64,
'S2': np.float64, 'D2': np.float64, 'R2': np.float64,
'src': str, 'time': str, 'mlon': np.float64, 'mlat': np.float64,
'mdep': np.float64})
ogcolumns = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src']
kagancols = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src', 'mlon', 'mlat', 'mdep']
eventlist = eventlistALL[kagancols]
if printtest:
lat65 = eventlist[eventlist.lat>65]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.lat <= 65]', datainfo,'df')
dataGP = eventlist[eventlist.etype == 'GP']
if len(dataGP>0):
s2f.addToDataInfo(dataGP, 0, 'eventlist = eventlist[eventlist.etype != GP]', datainfo,'df')
eventlist = eventlist[eventlist.lat <= 65]
eventlist = eventlist[eventlist.etype != 'GP']
maxID = eventlistALL['ID'].max()
# Add/Remove manually identified points that don't follow general rules
remdir = 'library/points_to_remove/current_files'
for badFile in os.listdir(remdir):
if badFile[0:3] == slab or badFile[0:3] == 'ALL' or ((slab == 'izu' or slab == 'kur') and badFile[0:3] == 'jap'):
print (' manually removing points listed in:',badFile)
donotuse = pd.read_csv('%s/%s'%(remdir,badFile))
eventlist = s2f.removePoints(donotuse, eventlist, lonmin,
lonmax, latmin, latmax, printtest, datainfo, True, slab)
doubleuse = pd.read_csv(addFile)
eventlist, maxID = s2f.doublePoints(doubleuse, eventlist, maxID)
if slab == 'kur':
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 100
if slab == 'sul' or slab == 'man':
eventlist = eventlist[eventlist.etype != 'CP']
if slab == 'him':
eventlist = eventlist[eventlist.src != 'schulte']
if slab == 'sumz' or slab == 'kur' or slab == 'jap' or slab == 'izu':
if printtest:
lat65 = eventlist[eventlist.etype=='TO']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != TO]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'TO']
if slab == 'kurz':
if printtest:
lat65 = eventlist[eventlist.etype=='ER']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != ER]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'ER']
if slab == 'sol':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon <= 149)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lon > 149)]
TR_data = TR_data[TR_data.lon>149]
if slab == 'man':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon >= 120)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype == BA) & (eventlist.lon >= 120)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | ((eventlist.lon < 120)|(eventlist.lat > 15))]
if slab == 'sum':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lat > 21)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lat <= 21)]
if slab == 'ryu':
ryutodata = eventlist[(eventlist.etype == 'TO')&(eventlist.lon>133)]
if slab == 'hel':
eventlist.loc[eventlist.etype == 'RF', 'etype'] = 'CP'
if slab == 'puyz' or slab == 'mak':
eventlist = eventlist[eventlist.src != 'ddgap']
# Set default uncertainties for events without uncertainties
eventlist.loc[eventlist.etype == 'EQ', 'unc'] = 15.0
eventlist.loc[eventlist.etype == 'CP', 'unc'] = 5.0
eventlist.loc[eventlist.etype == 'BA', 'unc'] = 1.0
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 40.0
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <5), 'unc'] = 5.0
if slab == 'puy':
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <15), 'unc'] = 15.0
eventlist.loc[eventlist.mlon < 0, 'mlon'] += 360
# Ensure all data are within longitudes 0-360
eventlist.loc[eventlist.lon < 0, 'lon']+=360
# Define mean depth of bathymetry (for constraining interp outboard trench)
meanBAlist = eventlist[eventlist.etype == 'BA']
meanBA = meanBAlist['depth'].mean()
del eventlistALL
''' ----- (11) Calculate seismogenic zone thickness ------ '''
# define seismogenic thickness parameters. change if needed
maxdep = 65
maxdepdiff = 20
origorcentl = 'c'
origorcentd = 'c'
slaborev = 'e'
lengthlim = -50
ogcolumns = eventlist.columns
eventlist = s2f.getReferenceKagan(slab1data, eventlist, origorcentl, origorcentd)
if slab != 'hin':
seismo_thick, taper_start = s2f.getSZthickness(eventlist,folder,slab,maxdep,maxdepdiff,origorcentl,origorcentd,slaborev,savedir,lengthlim)
else:
seismo_thick = 20
taper_start = 20
if slab == 'hel' or slab == 'car' or slab == 'mak':
seismo_thick = 40
if slab == 'sol':
seismo_thick = 40
if slab == 'alu' or slab == 'cot' or slab == 'sul':
seismo_thick = 10
if slab == 'sol':
eventlistE = eventlist[eventlist.lon>148]
eventlistW = eventlist[eventlist.lon<=148]
eventlistE = s2f.cmtfilter(eventlistE,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistE,eventlistW],sort=True)
if slab == 'sum':
eventlistS = eventlist[eventlist.lat<=22]
eventlistN = eventlist[eventlist.lat>22]
eventlistS = s2f.cmtfilter(eventlistS,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistS,eventlistN],sort=True)
if slab != 'hal' and slab != 'him' and slab != 'pam' and slab != 'hin' and slab != 'sol' and slab != 'sum' and slab != 'cas':
eventlist = s2f.cmtfilter(eventlist,seismo_thick,printtest,datainfo,slab)
eventlist = eventlist[ogcolumns]
''' ------ (12) Record variable parameters used for this model ------'''
f = open(these_params, 'w+')
f.write('Parameters used to create file for slab_Date_time: %s_%s_%s \n' \
%(slab, date, time))
f.write('\n')
f.close()
f = open(these_params, 'a')
f.write('inFile: %s \n' % inFile)
f.write('use_box: %s \n' % use_box)
f.write('latmin: %s \n' % str(latmin))
f.write('latmax: %s \n' % str(latmax))
f.write('lonmin: %s \n' % str(lonmin))
f.write('lonmax: %s \n' % str(lonmax))
f.write('slab: %s \n' % slab)
f.write('grid: %s \n' % str(grid))
f.write('radius1: %s \n' % str(radius1))
f.write('radius2: %s \n' % str(radius2))
f.write('alen: %s \n' % str(alen))
f.write('blen: %s \n' % str(blen))
f.write('sdr: %s \n' % str(sdr))
f.write('ddr: %s \n' % str(ddr))
f.write('taper: %s \n' % str(taper))
f.write('T: %s \n' % str(T))
f.write('node: %s \n' % str(node))
f.write('filt: %s \n' % str(filt))
f.write('maxdist: %s \n' % str(maxdist))
f.write('mindip: %s \n' % str(mindip))
f.write('minstk: %s \n' % str(minstk))
f.write('maxthickness: %s \n' % str(maxthickness))
f.write('seismo_thick: %s \n' % str(seismo_thick))
f.write('dipthresh: %s \n' % str(dipthresh))
f.write('fracS: %s \n' % str(fracS))
f.write('knot_no: %s \n' % str(knot_no))
f.write('kdeg: %s \n' % str(kdeg))
f.write('rbfs: %s \n' % str(rbfs))
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
f.write('undergrid: %s \n' % str(undergrid))
f.close()
''' ------ (13) Define search grid ------ '''
print(' Creating search grid...')
#Creates a grid over the slab region
regular_grid = s2f.create_grid_nodes3(grid, lonmin, lonmax, latmin, latmax)
grid_in_polygon = s2f.createGridInPolygon2(regular_grid, slab, polygonFile)
lons = grid_in_polygon[:, 0]
lats = grid_in_polygon[:, 1]
lons = np.round(lons,decimals=1)
lats = np.round(lats,decimals=1)
lons[lons <0] += 360
slab1guide,slab1query = s2f.makeReference(slab1data,lons,lats,grid,printtest,slab)
''' ------ (14) Identify tomography datasets ------ '''
## Identify how many tomography datasets are included
tomo_data = eventlist[eventlist.etype == 'TO']
if len(tomo_data) > 0 and slab != 'sam':
sources = tomo_data.src
TOsrc = set()
for x in sources:
TOsrc.add(x)
tomo_sets = TOsrc
tomo = True
else:
tomo_sets = 0
tomo = False
premulti = pd.DataFrame()
postmulti = pd.DataFrame()
OGmulti = pd.DataFrame()
elistAA = pd.DataFrame()
loncuts,latcuts,elistcuts = s2f.getlatloncutoffs(lons,lats,eventlist,printtest)
''' ------ (15) Initialize arrays for Section 2 ------ '''
# Creates list of events that were used for the model based on ID
used_all = np.zeros((1, 2))
used_TO = np.zeros((1, 2))
warnings.filterwarnings('ignore', 'Mean of empty slice.')
pd.options.mode.chained_assignment = None
'''Section 2: First loop
This Accomplishes:
1) Calculate error for each used tomography model.
This is accomplished by determining the difference between measured
depths for tomography and earthquake data, which will be used
outside of the loop.
2) Identify data to constrain depth/coordinate of center of Benioff Zone.
2a) Identify local strike, dip, and depth of Slab1.0.
If Slab 1.0 does not exist, acquire strike from closest trench
location with a strike oriented perpendicularly to this lon/lat.
If extending beyond Slab1.0 depths perpendicularly, find nearest and
most perpendicular point on Slab1.0, and define depth to
search from based on dip and depth of that point on Slab1.0. The
dip is defined as the dip of the local Slab1.0 point.
If extending along strike from Slab1.0, define depth to search from
based on mean depth of data within defined radius of node. The
dip of the node is defined as 0.
2b) Filter by ellipsoid oriented perpendicularly to Slab1.0.
If the local dip is less than mindip, orient ellipsoid vertically
and along strike found in (2a).
If the local dip is greater than mindip, orient ellipsoid
perpendicular to strike/dip found in (2a).
The long axis of the ellipse is defined as radius1, the short axis
is defined as radius2.
The shallow extent of the ellipsoid is defined as sdr at depths
above seismo_thick, and is tapered to 3*sdr at depths greater
than seismo_thick.
The deep extent of the ellipsoid is defined as sdr at depths above
seismo_thick, and is tapered to ddr at depths greater than
seismo_thick.
2c) Nodes outboard of the trench are only constrained by bathymetry.
Nodes inboard of the trench are constrained by all but bathymetry.
2d) Conditionally add average active source/average reciever functions.
If within the distance of the longest AS profile from the trench
identify the average AS profile depth at that distance from
trench. If there is no active source point within the search
ellipsoid defined in (2b), add an average active source data
point to the set of data to constrain the depth at this node.
Reciever functions in cam and alu are being utilized similarly with
defined distances from trench and distances along strike from
key profiles that need to be utilized in the absence of
seismicity.
2e) If information other than tomography is available above 300 km
depth, all tomography is filtered at that node.
2f) If less than two data points are available to constrain a node, no
depth is resolved at that node.
2g) If |strike of Slab1.0 at node - strike of Slab1.0 at farthest data|
> minstrk, filter data at ends until < minstrk.
If this node is outside of Slab1.0, reduce long axis of search
ellipsoid prior to starting filters.
The output of this loop is two numpy arrays and list of nodes with data:
used_TO: local difference between tomography and earthquake depths and
a tomography dataset identifier
used_all: indices for the data used and their associated nodes
This one is created to prevent the need for re-filtering
in later loops
'''
print("Start Section 2 of 7: First loop")
lons1 = (np.ones(len(lons))*-9999).astype(np.float64)
lats1 = (np.ones(len(lons))*-9999).astype(np.float64)
deps1 = (np.ones(len(lons))*-9999).astype(np.float64)
strs1 = (np.ones(len(lons))*-9999).astype(np.float64)
dips1 = (np.ones(len(lons))*-9999).astype(np.float64)
nIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
aleng = (np.ones(len(lons))*-9999).astype(np.float64)
bleng = (np.ones(len(lons))*-9999).astype(np.float64)
cleng = (np.ones(len(lons))*-9999).astype(np.float64)
sleng = (np.ones(len(lons))*-9999).astype(np.float64)
dleng = (np.ones(len(lons))*-9999).astype(np.float64)
elons1 = (np.ones(len(lons))*-9999).astype(np.float64)
elats1 = (np.ones(len(lons))*-9999).astype(np.float64)
edeps1 = (np.ones(len(lons))*-9999).astype(np.float64)
estrs1 = (np.ones(len(lons))*-9999).astype(np.float64)
edips1 = (np.ones(len(lons))*-9999).astype(np.float64)
enIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
ealeng = (np.ones(len(lons))*-9999).astype(np.float64)
ebleng = (np.ones(len(lons))*-9999).astype(np.float64)
ecleng = (np.ones(len(lons))*-9999).astype(np.float64)
esleng = (np.ones(len(lons))*-9999).astype(np.float64)
edleng = (np.ones(len(lons))*-9999).astype(np.float64)
if args.nCores is not None:
if args.nCores > 1 and args.nCores < 8:
pooling = True
elif args.nCores == 1:
pooling = False
else:
pooling = False
else:
pooling = False
cutcount = 1
allnewnodes = None
for cut in range(len(loncuts)):
theselats = latcuts[cut]
theselons = loncuts[cut]
theseevents = elistcuts[cut]
indices = range(len(theselats))
if cut == 0:
i2 = 0
cutcount+=1
if pooling:
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, theseevents,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices) #$$#
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
lons1[i2] = thisnode[0]
lats1[i2] = thisnode[1]
deps1[i2] = thisnode[2]
strs1[i2] = thisnode[3]
dips1[i2] = thisnode[4]
nIDs1[i2] = thisnode[5]
aleng[i2] = thisnode[6]
bleng[i2] = thisnode[7]
cleng[i2] = thisnode[8]
sleng[i2] = thisnode[14]
dleng[i2] = thisnode[15]
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
newnodes = thisnode[12]
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not thisnode[13] and np.isfinite(thisnode[2]):
elons1[i2] = thisnode[0]
elats1[i2] = thisnode[1]
edeps1[i2] = thisnode[2]
estrs1[i2] = thisnode[3]
edips1[i2] = thisnode[4]
enIDs1[i2] = thisnode[5]
ealeng[i2] = thisnode[6]
ebleng[i2] = thisnode[7]
ecleng[i2] = thisnode[8]
esleng[i2] = thisnode[14]
edleng[i2] = thisnode[15]
i2 += 1
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aaleng, ableng, acleng, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, asleng, adleng = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, theseevents, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
lons1[i2] = alon
lats1[i2] = alat
deps1[i2] = alocdep
strs1[i2] = alocstr
dips1[i2] = alocdip
nIDs1[i2] = anID
aleng[i2] = aaleng
bleng[i2] = ableng
cleng[i2] = acleng
sleng[i2] = asleng
dleng[i2] = adleng
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not anydata and np.isfinite(alocdep):
elons1[i2] = alon
elats1[i2] = alat
edeps1[i2] = alocdep
estrs1[i2] = alocstr
edips1[i2] = alocdip
enIDs1[i2] = anID
ealeng[i2] = aaleng
ebleng[i2] = ableng
ecleng[i2] = acleng
esleng[i2] = asleng
edleng[i2] = adleng
i2 += 1
lons1 = lons1[lons1>-999]
lats1 = lats1[lats1>-999]
deps1 = deps1[(deps1>-999)|np.isnan(deps1)]
strs1 = strs1[strs1>-999]
dips1 = dips1[dips1>-999]
nIDs1 = nIDs1[nIDs1>-999]
aleng = aleng[aleng>-999]
bleng = bleng[bleng>-999]
cleng = cleng[cleng>-999]
sleng = sleng[sleng>-999]
dleng = dleng[dleng>-999]
elons1 = elons1[edleng>-999]
elats1 = elats1[edleng>-999]
edeps1 = edeps1[(edeps1>-999)|np.isnan(edeps1)]
estrs1 = estrs1[edleng>-999]
edips1 = edips1[edleng>-999]
enIDs1 = enIDs1[edleng>-999]
ealeng = ealeng[edleng>-999]
ebleng = ebleng[edleng>-999]
ecleng = ecleng[edleng>-999]
esleng = esleng[edleng>-999]
edleng = edleng[edleng>-999]
testdf = pd.DataFrame({'lon':lons1,'lat':lats1,'depth':deps1,'strike':strs1,'dip':dips1,'id':nIDs1,'alen':aleng,'blen':bleng,'clen':cleng,'slen':sleng,'dlen':dleng})
testdf.to_csv('firstloop.csv',header=True,index=False,na_rep=np.nan)
if allnewnodes is not None:
theseIDs = []
for i in range(len(allnewnodes)):
if allnewnodes[i,1]>0:
thisnID = int('%i%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*10))
else:
thisnID = int('%i0%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*-10))
theseIDs.append(thisnID)
newlonsdf1 = pd.DataFrame({'lon':allnewnodes[:,0],'lat':allnewnodes[:,1],'nID':theseIDs})
newlonsdf = newlonsdf1.drop_duplicates(['nID'])
theselons = newlonsdf['lon'].values
theselats = newlonsdf['lat'].values
if grid == 0.2:
grid2 = 0.1
elif grid == 0.1:
grid2 = 0.05
else:
grid2 = grid
slab1guide,slab1query = s2f.makeReference(slab1data,theselons,theselats,grid2,printtest,slab)
newlats = []
newlons = []
newdeps = []
newstrs = []
newdips = []
newnIDs = []
newalen = []
newblen = []
newclen = []
newslen = []
newdlen = []
enewlats = []
enewlons = []
enewdeps = []
enewstrs = []
enewdips = []
enewnIDs = []
enewalen = []
enewblen = []
enewclen = []
enewslen = []
enewdlen = []
if pooling:
indices = range(len(theselons))
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, eventlist,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices)
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
newlons.append(thisnode[0])
newlats.append(thisnode[1])
newdeps.append(thisnode[2])
newstrs.append(thisnode[3])
newdips.append(thisnode[4])
newnIDs.append(thisnode[5])
newalen.append(thisnode[6])
newblen.append(thisnode[7])
newclen.append(thisnode[8])
newslen.append(thisnode[14])
newdlen.append(thisnode[15])
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = | pd.concat([elistAA, AAadd],sort=True) | pandas.concat |
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import talib as ta
import yfinance as yf
import matplotlib.pyplot as plt
from datetime import datetime
import matplotlib.dates as mdates
from yahooquery import Ticker
import pandas as pd
import streamlit as st
from src.tools import functions as f0
plt.style.use('seaborn-talk')
sm, med, lg = 10, 15, 20
plt.rc("font", size=sm) # controls default text sizes
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=sm) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
plt.rcParams["figure.figsize"] = [15, 8]
plt.rcParams["figure.dpi"] = 100
class Indicator_Ike(object):
def __init__(self, ticker, date1=str(datetime.now())[:10], cc=0.0, ccc=0.0, graphit=True):
self.stock = ticker
self.date1 = date1
self.cc = cc
self.ccc = ccc
self.graphit = graphit
def get_data(self):
data = yf.download(self.stock, period='1y')
self.data = data.loc[:self.date1]
return self.data
def bollinger_bands(self, df):
df['upper_band'], df['middle_band'], df['lower_band'] = ta.BBANDS(df['Close'], timeperiod =20)
df["Signal"] = 0.0
df["Signal"] = np.where(df['Close'] > df['middle_band'], 1.0, 0.0)
df["Position"] = df["Signal"].diff()
df_pos = df[(df["Position"] == 1) | (df["Position"] == -1)]
df_pos["Position"] = df_pos["Position"].apply(lambda x: "Buy" if x == 1 else "Sell")
if self.graphit is True:
fig, ax = plt.subplots()
plt.tick_params(axis="both", labelsize=15)
df['Close'].plot(color="k", lw=2, label='Close')
df['upper_band'].plot(color="g", lw=1, label='upper_band', linestyle='dashed')
df['middle_band'].plot(color="r", lw=1, label='middle_band')
df['lower_band'].plot(color="b", lw=1, label='lower_band', linestyle='dashed')
# plot 'buy' signals
plt.plot(
df[df["Position"] == 1].index,
df['Close'][df["Position"] == 1],
"^", markersize=15, color="g", alpha=0.7, label="buy",
)
# plot 'sell' signals
plt.plot(
df[df["Position"] == -1].index,
df['Close'][df["Position"] == -1],
"v", markersize=15, color="r", alpha=0.7, label="sell",
)
plt.ylabel("Price", fontsize=20, fontweight="bold")
plt.xlabel("Date", fontsize=20, fontweight="bold")
plt.grid(True, color="k", linestyle="-", linewidth=1, alpha=0.3)
ax.legend(loc="best", prop={"size": 16})
plt.tight_layout()
plt.show()
st.pyplot(fig)
try:
if df_pos['Position'][-1] == 'Buy':
st.metric(f"No. {self.cc} / {self.ccc} In Portfolio", f"{self.stock}", f"{df_pos['Position'][-1]}");
return
elif df_pos['Position'][-1] == 'Sell':
st.metric(f"No. {self.cc} / {self.ccc} In Portfolio", f"{self.stock}", f"- {df_pos['Position'][-1]}")
except Exception:
pass
def macd(self, data):
data['macd'], data['macdsignal'], data['macdhist'] = ta.MACD(data['Close'], fastperiod=12, slowperiod=26, signalperiod=9)
stock_df = pd.DataFrame(data)
stock_df["Signal"] = 0.0
stock_df["Signal"] = np.where(stock_df['macd'] > stock_df['macdsignal'], 1.0, 0.0)
stock_df["Position"] = stock_df["Signal"].diff()
df_pos = stock_df[(stock_df["Position"] == 1) | (stock_df["Position"] == -1)]
df_pos["Position"] = df_pos["Position"].apply(lambda x: "Buy" if x == 1 else "Sell")
stock_df.dropna(inplace=True)
if self.graphit is True:
fig, ax = plt.subplots()
# plot close price, short-term and long-term moving averages
plt.tick_params(axis="both", labelsize=15)
stock_df["macdhist"].plot(color="r", lw=1.5, label="macdhist")
stock_df['macd'].plot(color="b", lw=2, label='macd')
stock_df['macdsignal'].plot(color="g", lw=2, label='macdsignal')
# plot 'buy' signals
plt.plot(
stock_df[stock_df["Position"] == 1].index,
stock_df['macd'][stock_df["Position"] == 1],
"^", markersize=15, color="g", alpha=0.7, label="buy",
)
# plot 'sell' signals
plt.plot(
stock_df[stock_df["Position"] == -1].index,
stock_df['macd'][stock_df["Position"] == -1],
"v", markersize=15, color="r", alpha=0.7, label="sell",
)
plt.ylabel("MACD", fontsize=20, fontweight="bold")
plt.xlabel("Date", fontsize=20, fontweight="bold")
plt.grid(True, color="k", linestyle="-", linewidth=1, alpha=0.3)
ax.legend(loc="best", prop={"size": 16})
plt.tight_layout()
plt.show()
st.pyplot(fig)
try:
if df_pos['Position'][-1] == 'Buy':
st.metric(f"No. {self.cc} / {self.ccc} In Portfolio", f"{self.stock}", f"{df_pos['Position'][-1]}");
return
elif df_pos['Position'][-1] == 'Sell':
st.metric(f"No. {self.cc} / {self.ccc} In Portfolio", f"{self.stock}", f"- {df_pos['Position'][-1]}")
except Exception:
pass
act_lst = []
for i in stock_df['Position']:
if i == 1.0:
act_lst.append('Buy')
elif i == -1.0:
act_lst.append('Sell')
else:
act_lst.append('')
stock_df['action'] = act_lst
del stock_df['Open']
del stock_df['High']
del stock_df['Low']
del stock_df['Adj Close']
del stock_df['Volume']
stock_df = stock_df[stock_df['action'] != ""]
def rsi(self, data):
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
This module aims to standardize the training and evaluation procedure.
"""
import numpy as np
import pandas as pd
import xarray as xr
from os.path import join, exists
from os import listdir
from ninolearn.utils import print_header, small_print_header
from ninolearn.pathes import modeldir, processeddir
# evaluation decades
decades = [1963, 1972, 1982, 1992, 2002, 2012, 2018]
decades_elninolike = []
n_decades = len(decades)
# lead times for the evaluation
lead_times = [0, 3, 6, 9, 12, 15, 18, 21]
n_lead = len(lead_times)
decade_color = ['orange', 'violet', 'limegreen', 'darkgoldenrod', 'red', 'royalblue']
decade_name = ['1963-1971', '1972-1981', '1982-1991', '1992-2001', '2002-2011', '2012-2017']
def cross_training(model, pipeline, n_iter, **kwargs):
"""
Training the model on different training sets in which each time a period\
corresponing to a decade out of 1962-1971, 1972-1981, ..., 2012-last \
ovserved date is spared.
:param model: A model that follows the guidelines how a model object\
should be set up.
:param pipeline: a function that takes lead time as argument and returns\
the corresponding feature, label, time and persistance.
:param save_dir: The prefix of the save directory.
:param **kwargs: Arguments that shell be passed to the .set_parameter() \
method of the provided model.
"""
for lead_time in lead_times:
X, y, timey = pipeline(lead_time, return_persistance=False)
print_header(f'Lead time: {lead_time} month')
for j in range(n_decades-1):
m = model(**kwargs)
dir_name = f"{m.hyperparameters['name']}_decade{decades[j]}_lead{lead_time}"
path = join(modeldir, dir_name)
n_files=0
if exists(path):
n_files = len(listdir(path))
if not exists(path) or n_files==0:
small_print_header(f'Test period: {decades[j]}-01-01 till {decades[j+1]-1}-12-01')
test_indeces = (timey>=f'{decades[j]}-01-01') & (timey<=f'{decades[j+1]-1}-12-01')
train_indeces = np.invert(test_indeces)
trainX, trainy, traintime = X[train_indeces,:], y[train_indeces], timey[train_indeces]
m.fit_RandomizedSearch(trainX, trainy, traintime, n_iter=n_iter)
m.save(location=modeldir, dir_name=dir_name)
else:
print(f'{dir_name} already exists')
del m
def cross_hindcast(model, pipeline, model_name, **kwargs):
"""
Generate a hindcast from 1962 till today using the models which were
trained by the .cross_training() method.
:param model: The considered model.
:param pipeline: The data pipeline that already was used before in \
.cross_training().
"""
first_lead_loop = True
for i in range(n_lead):
lead_time = lead_times[i]
print_header(f'Lead time: {lead_time} months')
X, y, timey, y_persistance = pipeline(lead_time, return_persistance=True)
ytrue = np.array([])
timeytrue = pd.DatetimeIndex([])
first_dec_loop = True
for j in range(n_decades-1):
small_print_header(f'Predict: {decades[j]}-01-01 till {decades[j+1]-1}-12-01')
# test indices
test_indeces = (timey>=f'{decades[j]}-01-01') & (timey<=f'{decades[j+1]-1}-12-01')
testX, testy, testtimey = X[test_indeces,:], y[test_indeces], timey[test_indeces]
m = model(**kwargs)
m.load(location=modeldir, dir_name=f'{model_name}_decade{decades[j]}_lead{lead_time}')
# allocate arrays and variables for which the model must be loaded
if first_dec_loop:
n_outputs = m.n_outputs
output_names = m.output_names
pred_full = np.zeros((n_outputs, 0))
first_dec_loop=False
# make prediction
pred = np.zeros((m.n_outputs, testX.shape[0]))
pred[:,:] = m.predict(testX)
# make the full time series
pred_full = np.append(pred_full, pred, axis=1)
ytrue = np.append(ytrue, testy)
timeytrue = timeytrue.append(testtimey)
del m
if timeytrue[0]!=pd.to_datetime('1963-01-01'):
expected_first_date = '1963-01-01'
got_first_date = timeytrue[0].isoformat()[:10]
raise Exception(f"The first predicted date for lead time {lead_time} \
is {got_first_date} but expected {expected_first_date}")
# allocate arrays and variables for which the full length of the time
# series must be known
if first_lead_loop:
n_time = len(timeytrue)
pred_save = np.zeros((n_outputs, n_time, n_lead))
first_lead_loop=False
pred_save[:,:,i] = pred_full
# Save data to a netcdf file
save_dict = {}
for i in range(n_outputs):
save_dict[output_names[i]] = (['target_season', 'lead'], pred_save[i,:,:])
ds = xr.Dataset(save_dict, coords={'target_season': timeytrue,
'lead': lead_times} )
ds.to_netcdf(join(processeddir, f'{model_name}_forecasts.nc'))
def cross_hindcast_dem(model, pipeline, model_name):
"""
Generate a hindcast from 1962 till today using the models which were
trained by the .cross_training() method. ONLY works for the DEM.
This routine returns an std estimate that is only based on the corrlation
skill of the DEM predicted mean.
:param model: The considered model.
:param pipeline: The data pipeline that already was used before in \
.cross_training().
"""
#cross_hindcast(model, pipeline, model_name)
std_estimate = xr.open_dataarray(join(processeddir, f'{model_name}_std_estimate.nc'))
first_lead_loop = True
for i in range(n_lead):
lead_time = lead_times[i]
print_header(f'Lead time: {lead_time} months')
X, y, timey, y_persistance = pipeline(lead_time, return_persistance=True)
ytrue = np.array([])
timeytrue = pd.DatetimeIndex([])
first_dec_loop = True
for j in range(n_decades-1):
small_print_header(f'Predict: {decades[j]}-01-01 till {decades[j+1]-1}-12-01')
# test indices
test_indeces = (timey>=f'{decades[j]}-01-01') & (timey<=f'{decades[j+1]-1}-12-01')
testX, testy, testtimey = X[test_indeces,:], y[test_indeces], timey[test_indeces]
m = model()
m.load(location=modeldir, dir_name=f'{model_name}_decade{decades[j]}_lead{lead_time}')
# allocate arrays and variables for which the model must be loaded
if first_dec_loop:
n_outputs = m.n_outputs
output_names = m.output_names
pred_full = np.zeros((n_outputs+1, 0))
first_dec_loop=False
# make prediction
pred = np.zeros((m.n_outputs+1, testX.shape[0]))
pred[:2,:] = m.predict(testX)
for k in range(len(testtimey)):
month = testtimey[k].date().month
pred[-1, k] = std_estimate[i, month-1]
# make the full time series
pred_full = np.append(pred_full, pred, axis=1)
ytrue = np.append(ytrue, testy)
timeytrue = timeytrue.append(testtimey)
del m
if timeytrue[0]!= | pd.to_datetime('1963-01-01') | pandas.to_datetime |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import itertools
from datetime import datetime
import numpy as np
import sklearn.mixture as mix
from matplotlib.dates import YearLocator, MonthLocator
import warnings
from pylab import rcParams
from matplotlib.pyplot import cm
rcParams['figure.figsize'] = [10, 5]
sns.set()
warnings.filterwarnings("ignore")
paths = ["/Users/julienraffaud/Desktop/Data/DEXJPUS.csv",
"/Users/julienraffaud/Desktop/Data/DGS10.csv",
"/Users/julienraffaud/Desktop/Data/NIKKEI225.csv"]
# Importing, collating and formatting our three time series:
# USD/JPY, 10Y constant maturity JGB yield & Nikkei index.
dataframes = []
for file in paths:
df = pd.read_csv(file)
df["DATE"] = df["DATE"].astype(datetime)
df = df[df[df.columns[1]]!= "."]
df[df.columns[1]] = df[df.columns[1]].astype(float)
df = df.set_index("DATE")
dataframes.append(df)
# Formatting the final dataframe of the time series.
time_series = pd.concat(dataframes,axis=1,sort='False').dropna()
time_series.columns = ["USD/JPY","JGB","NIKKEI"]
time_series.index = | pd.to_datetime(time_series.index) | pandas.to_datetime |
import json
import time
import uuid
import numpy as np
import pandas as pd
from great_expectations.dataset import PandasDataset
from feast import (
Client,
Entity,
Feature,
FeatureTable,
FileSource,
KafkaSource,
ValueType,
)
from feast.contrib.validation.ge import apply_validation, create_validation_udf
from feast.data_format import AvroFormat, ParquetFormat
from feast.pyspark.abc import SparkJobStatus
from feast.wait import wait_retry_backoff
from tests.e2e.utils.kafka import check_consumer_exist, ingest_and_retrieve
def generate_train_data():
df = | pd.DataFrame(columns=["key", "num", "set", "event_timestamp"]) | pandas.DataFrame |
#!/usr/bin/env python3
import sys
import argparse
import seaborn
from evalys import *
from evalys.jobset import *
from evalys.mstates import *
from evalys.pstates import *
from evalys.visu.legacy import *
import pandas as pd
import matplotlib.pyplot as plt
def main():
# Argument parsing
parser = argparse.ArgumentParser(description='Draws the states the machines are in over time')
parser.add_argument('--mstatesCSV', '-m', nargs='+',
help='The name of the CSV file which contains pstate information')
parser.add_argument('--jobsCSV', '-j', nargs='+',
help='The name of the CSV file which contains jobs information')
parser.add_argument('--pstatesCSV', '-p', nargs='+',
help='The name of the CSV file which contains pstate information')
parser.add_argument('--energyCSV', '-e', nargs='+',
help='The name of the CSV file which contains energy consumption information')
parser.add_argument('--llhCSV', '-l', nargs='+',
help='The name of the CSV file which contains LLH information')
parser.add_argument('--llh-bound',
type=float,
help='If set, draws a LLH horizontal line on this bound')
parser.add_argument('--priority-job-waiting-time-bound',
type=float,
help='If set, draws an horizon line corresponding to this bound')
parser.add_argument('--time-window', nargs='+',
type=float,
help="If set, limits the time window of study. Example: 0 4200")
parser.add_argument('--force-right-adjust',
type=float,
help='If set, forces the right adjustement of the plot.')
parser.add_argument('--off', nargs='+',
help='The power states which correspond to OFF machine states')
parser.add_argument('--switchon', nargs='+',
help='The power states which correspond to a switching ON machine state')
parser.add_argument('--switchoff', nargs='+',
help='The power states which correspond to switching OFF machine state')
parser.add_argument('--names', nargs='+',
default=['Unnamed'],
help='When multiple instances must be plotted, their names must be given via this parameter.')
parser.add_argument('--output', '-o',
help='The output file (format depending on the given extension, pdf is RECOMMENDED). For example: figure.pdf')
parser.add_argument("--gantt", action='store_true',
help="If set, the gantt chart will be outputted. Requires jobs, pstates and probably machine values (--off, --switchon, --switchoff)")
parser.add_argument("--ru", action='store_true',
help="If set, the resource usage will be outputted. Requires machine states")
parser.add_argument("--power", action='store_true',
help="If set, the instantaneous power will be outputted. Requires energyCSV")
parser.add_argument("--energy", action='store_true',
help="If set, the cumulated energy consumption will be outputted. Requires energyCSV")
parser.add_argument('--llh', action='store_true',
help='If set, the LLH will be outputted. Requires llhCSV. Jobs are optional.')
parser.add_argument('--load-in-queue', action='store_true',
help='If set, the load in queue will be outputted. Requires llhCSV.')
parser.add_argument('--nb-jobs-in-queue', action='store_true',
help='If set, the number of jobs in queue will be outputted. Requires llhCSV.')
parser.add_argument('--priority-job-size', action='store_true',
help='If set, the size of the priority job will be outputted. Requires llhCSV.')
parser.add_argument('--priority-job-expected-waiting-time', action='store_true',
help='If set, the expected waiting time of the priority job will be outputted. Requires llhCSV.')
parser.add_argument('--priority-job-starting-expected-soon', action='store_true',
help='If set, whether the priority job is expected to start soon will be outputted. Requires llhCSV')
args = parser.parse_args()
###################
# Figure creation #
###################
nb_instances = None
nb_subplots = 0
left_adjust = 0.05
top_adjust = 0.95
bottom_adjust = 0.05
right_adjust = 0.95
if args.gantt:
assert(args.jobsCSV), "Jobs must be given to compute the gantt chart!"
nb_jobs_csv = len(args.jobsCSV)
if args.pstatesCSV:
nb_pstates_csv = len(args.pstatesCSV)
assert(nb_jobs_csv == nb_pstates_csv), "The number of jobs_csv ({}) should equal the number of pstates_csv ({})".format(nb_jobs_csv, nb_pstates_csv)
nb_gantt = nb_jobs_csv
nb_subplots += nb_gantt
nb_instances = nb_gantt
if args.ru:
assert(args.mstatesCSV), "Mstates must be given to compute the resource usage!"
right_adjust = min(right_adjust, 0.85)
nb_ru = len(args.mstatesCSV)
nb_subplots += nb_ru
if nb_instances is not None:
assert(nb_instances == nb_ru), 'Inconsistent number of instances (nb_ru={} but already got nb_instances={})'.format(nb_ru, nb_instances)
else:
nb_instances = nb_ru
if args.power:
assert(args.energyCSV), "EnergyCSV must be given to compute power!"
nb_subplots += 1
right_adjust = min(right_adjust, 0.85)
if args.energy:
assert(args.energyCSV), "EnergyCSV must be given to compute energy!"
nb_energy = 1
nb_subplots += nb_energy
right_adjust = min(right_adjust, 0.85)
if args.energyCSV:
nb_energy_csv = len(args.energyCSV)
if nb_instances is not None:
assert(nb_instances == nb_energy_csv), 'Inconsistent number of instances (nb_energy_csv={} but already got nb_instances={})'.format(nb_energy_csv, nb_instances)
else:
nb_instances = nb_energy_csv
if args.llh:
assert(args.llhCSV), "LLH_CSV must be given to compute llh!"
right_adjust = min(right_adjust, 0.85)
nb_subplots += 1
if args.load_in_queue:
assert(args.llhCSV), "LLH_CSV must be given to compute llh!"
nb_subplots += 1
if args.nb_jobs_in_queue:
assert(args.llhCSV), "LLH_CSV must be given to compute the queue!"
nb_subplots += 1
if args.priority_job_size:
assert(args.llhCSV), "LLH_CSV must be given to compute the priority job size!"
nb_subplots += 1
if args.priority_job_expected_waiting_time:
assert(args.llhCSV), "LLH_CSV must be given to compute the priority job size!"
nb_subplots += 1
if args.priority_job_starting_expected_soon:
assert(args.llhCSV), "LLH_CSV must be given to compute the priority job size!"
nb_subplots += 1
if args.llhCSV:
nb_llh_csv = len(args.llhCSV)
if nb_instances is not None:
assert(nb_instances == nb_llh_csv), 'Inconsistent number of instances (nb_llh_csv={} but already got nb_instances={})'.format(nb_llh_csv, nb_instances)
else:
nb_instances = nb_llh_csv
if nb_subplots == 0:
print('There is nothing to plot!')
sys.exit(0)
names = args.names
assert(nb_instances == len(names)), 'The number of names ({} in {}) should equal the number of instances ({})'.format(len(names), names, nb_instances)
if args.force_right_adjust:
right_adjust = args.force_right_adjust
fig, ax_list = plt.subplots(nb_subplots, sharex=True, sharey=False)
fig.subplots_adjust(bottom=bottom_adjust,
right=right_adjust,
top=top_adjust,
left=left_adjust)
#fig.tight_layout()
if nb_subplots < 2:
ax_list = [ax_list]
##########################################
# Create data structures from input args #
##########################################
time_min = None
time_max = None
if args.time_window:
time_min, time_max = [float(f) for f in args.time_window]
jobs = list()
if args.jobsCSV and (args.gantt or args.llhCSV):
for csv_filename in args.jobsCSV:
jobs.append(JobSet.from_csv(csv_filename))
pstates = list()
if args.pstatesCSV and args.gantt:
for csv_filename in args.pstatesCSV:
pstates.append(PowerStatesChanges(csv_filename))
machines = list()
if args.mstatesCSV and args.ru:
for csv_filename in args.mstatesCSV:
machines.append(MachineStatesChanges(csv_filename, time_min, time_max))
llh = list()
if args.llhCSV:
for csv_filename in args.llhCSV:
llh_data = pd.read_csv(csv_filename)
# Drop values outside the time window
if time_min is not None:
llh_data = llh_data.loc[llh_data['date'] >= time_min]
if time_max is not None:
llh_data = llh_data.loc[llh_data['date'] <= time_max]
llh.append(llh_data)
energy = list()
power = list()
if args.energyCSV:
for csv_filename in args.energyCSV:
energy_data = pd.read_csv(csv_filename)
# Drop values outside the time window
if time_min is not None:
energy_data = energy_data.loc[energy_data['time'] >= time_min]
if time_max is not None:
energy_data = energy_data.loc[energy_data['time'] <= time_max]
energy.append(energy_data)
if args.power:
df = energy_data.drop_duplicates(subset='time')
df = df.drop(['event_type', 'wattmin', 'epower'], axis=1)
diff = df.diff(1)
diff.rename(columns={'time':'time_diff', 'energy':'energy_diff'},
inplace=True)
joined = | pd.concat([df, diff], axis=1) | pandas.concat |
import json
import os
import csv
import socket
import pandas as pd
import numpy as np
import glob
import logging
from datetime import datetime, timedelta
from flask import flash, current_app
from flask_login import current_user
from pathlib import Path
from specter_importer import Specter
from pricing_engine.engine import (fx_rate,
price_ondate, fx_price_ondate, realtime_price,
historical_prices)
from pricing_engine.cryptocompare import multiple_price_grab
from warden_decorators import MWT, timing
from utils import load_config
from dateutil import parser
from parseNumbers import parseNumber
# Returns the current application path
def current_path():
application_path = os.path.dirname(os.path.abspath(__file__))
return (application_path)
# Returns the home path
def home_path():
home = str(Path.home())
return (home)
# ------------------------------------
# Address and Port checker - to check
# which services are running
def check_server(address, port, timeout=10):
# Create a TCP socket
s = socket.socket()
s.settimeout(timeout)
try:
s.connect((address, port))
return True
except socket.error:
return False
finally:
s.close()
# End Config Variables ------------------------------------------------
# Get all transactions of specific wallet by using alias
def get_specter_tx(wallet_alias, sort_by='time', idx=0, load=True, session=None):
df = pd.DataFrame()
wallet_list = current_app.specter.wallet_alias_list()
if wallet_alias not in wallet_list:
logging.error(f"Wallet {wallet_alias}: Wallet not in current_app")
return (df)
t = current_app.specter.refresh_txs(load=True)
df = df.append(pd.DataFrame(t))
logging.info(f"Wallet {wallet_alias} --- Finished txs")
# Sort df
if not df.empty:
df = df.sort_values(by=[sort_by], ascending=False)
return (df)
# This returns data to create the Warden Status Page
def warden_metadata():
from utils import pickle_it
meta = {}
meta['full_df'] = specter_df()
meta['wallet_list'] = current_app.specter.wallet_alias_list()
# Load pickle with previous checkpoint df
df_pkl = 'txs_pf.pkl'
data = pickle_it(action='load', filename=df_pkl)
if not isinstance(data, pd.DataFrame):
if data == 'file not found':
meta['df_old'] = None
else:
meta['df_old'] = data
# load difference / changes in addresses from file
ack_file = 'txs_diff.pkl'
data = pickle_it(action='load', filename=ack_file)
if data == 'file not found':
meta['ack_file'] = None
else:
meta['ack_file'] = data
meta['old_new_df_old'] = data['deleted']
meta['old_new_df_new'] = data['added']
return (meta)
# Transactions Engine --------------------------------------
class Trades():
def __init__(self):
self.id = None
self.user_id = "specter_user"
self.trade_inputon = None
self.trade_date = None
self.trade_currency = current_app.settings['PORTFOLIO']['base_fx']
self.trade_asset_ticker = None
self.trade_account = None
self.trade_quantity = None
self.trade_operation = None
self.trade_price = None
self.trade_fees = None
self.trade_notes = None
self.trade_reference_id = None
self.trade_blockchain_id = None
self.cash_value = None
def to_dict(self):
return (vars(self))
def specter_df(delete_files=False, sort_by='trade_date'):
from utils import pickle_it
df = pd.DataFrame()
try:
t = current_app.specter.refresh_txs(load=True)['txlist']
df = df.append(t)
except Exception as e:
print(e)
# Check if txs exists
return df
if df.empty:
return df
# Clean Date String
df['trade_date'] = pd.to_datetime(df['time'], unit='s')
# Add additional columns
if 'fee' not in df:
df['fee'] = 0
df['trade_blockchain_id'] = df['txid']
df['trade_account'] = df['wallet_alias']
df['trade_currency'] = current_app.settings['PORTFOLIO']['base_fx']
df['trade_asset_ticker'] = "BTC"
portfolio_divisor = current_app.settings['PORTFOLIO'].getfloat('divisor')
if portfolio_divisor is None:
portfolio_divisor = 1
df['amount'] = df['amount'].apply(parseNumber)
try:
df['amount'] = df['amount'] / portfolio_divisor
except TypeError:
pass
df['trade_quantity'] = df['amount']
df['trade_notes'] = 'Imported from Specter Wallet'
df['trade_reference_id'] = ""
def trade_operation(value):
# Get Bitcoin price on each Date
try:
if value.lower() == 'receive':
return ("B")
if value.lower() == 'send':
return ("S")
except Exception:
return ("")
df['trade_operation'] = df['category'].apply(trade_operation)
df['date_str'] = df['trade_date'].dt.strftime('%Y-%m-%d')
def btc_price(date_input):
get_date = datetime.strptime(date_input, "%Y-%m-%d")
# Create price object
try:
fx = fx_price_ondate('USD', current_app.fx['code'], get_date)
price = price_ondate("BTC", get_date)['close'] * fx
except Exception as e:
logging.error("Not Found. Error: " + str(e))
price = 0
return (price)
df['btc_price'] = df['date_str'].apply(btc_price)
df['trade_price'] = df['btc_price']
# For some reason Specter is returning fee = 1 for some transactions
# So the filter below clears all fees higher than 0.10 BTC which is
# probably too high :)
df.loc[df.fee > 0.10, 'fee'] = 0
df['fee'] = df['fee'].fillna(0)
df['trade_fees'] = df['fee'] * df['btc_price']
df['trade_multiplier'] = 0
df.loc[df.trade_operation == 'B', 'trade_multiplier'] = 1
df.loc[df.trade_operation == 'receive', 'trade_multiplier'] = 1
df.loc[df.trade_operation == 'S', 'trade_multiplier'] = -1
df.loc[df.trade_operation == 'send', 'trade_multiplier'] = -1
df['trade_quantity'] = df['trade_quantity'] * df['trade_multiplier']
df['amount'] = df['trade_quantity']
try:
df['cash_value'] = abs(df['trade_price']) * abs(df['trade_quantity']) * df[
'trade_multiplier']
except Exception:
df['cash_value'] = 0
df['loaded'] = False
# TEST LINE ------------- Make this a new transaction forced into df
tester = {
'trade_date': datetime.now(),
'trade_currency': 'USD',
'trade_fees': 0,
'trade_quantity': 1,
'trade_multiplier': 1,
'trade_price': 10000,
'trade_asset_ticker': 'BTC',
'trade_operation': 'B',
'checksum': (5 * (10**19)),
'txid': 'test',
'address': 'test_address',
'amount': 2,
'status': 'Test_line',
'trade_account': 'trezor',
'loaded': False,
'trade_blockchain_id': 'xxsxmssxkxsjsxkxsx'
}
# Comment / Uncomment code below for testing of including new transactions
# Remove last 2 transactions here
# df.drop(df.tail(2).index, inplace=True)
# add transaction above
# df = df.append(tester, ignore_index=True)
# END TEST LINE ----------------------------------------------------
# Files ----------------------------------
df_pkl = 'txs_pf.pkl'
old_df_file = 'old_df.pkl'
ack_file = 'txs_diff.pkl'
# -----------------------------------------
# Activity checkpoint will be created. Delete all old files.
if delete_files:
pickle_it(action='delete', filename=df_pkl)
pickle_it(action='delete', filename=old_df_file)
pickle_it(action='delete', filename=ack_file)
# save this latest df to a file
pickle_it(action='save', filename=df_pkl, data=df)
try:
# Loads the old df to check for activity
df_loaded = pickle_it(action='load', filename=old_df_file)
if not isinstance(df_loaded, pd.DataFrame):
if df_loaded == "file not found":
raise FileNotFoundError
df_loaded['loaded'] = True
# Find differences in old vs. new
df_check = pd.concat([df, df_loaded]).drop_duplicates(
subset='trade_blockchain_id', keep=False)
if not df_check.empty:
# Let's find which checksums are different and compile a list - save this list
# so it can be used on main page to highlight changes
df_old = df_check[df_check['loaded']]
df_new = df_check[~df_check['loaded']]
json_save = {
'changes_detected_on': datetime.now().strftime("%I:%M %p on %B %d, %Y"),
'deleted': df_old,
'added': df_new
}
# If activity is detected, don't delete the old df by saving new df over
save_files = False
else:
json_save = {
'changes_detected_on': None,
'deleted': None,
'added': None
}
save_files = True
# Save the dict above to be accessed later
pickle_it(action='save', filename=ack_file, data=json_save)
except FileNotFoundError:
# Files not found - let's save a new checkpoint
save_files = True
# Sort
df = df.sort_values(by=[sort_by], ascending=False)
if save_files:
pickle_it(action='save', filename=old_df_file, data=df)
return (df)
def find_fx(row, fx=None):
# row.name is the date being passed
# row['trade_currency'] is the base fx (the one where the trade was included)
# Create an instance of PriceData:
price = fx_price_ondate(
current_app.settings['PORTFOLIO']['base_fx'], row['trade_currency'], row.name)
return price
@ MWT(timeout=20)
def transactions_fx():
# Gets the transaction table and fills with fx information
# Note that it uses the currency exchange for the date of transaction
# Get all transactions from Specter and format
# SPECTER ============================================
df = specter_df()
if not df.empty:
df['trade_date'] = pd.to_datetime(df['trade_date'])
df = df.set_index('trade_date')
# Ignore times in df to merge - keep only dates
df.index = df.index.floor('d')
df.index.rename('date', inplace=True)
# SQL DATABASE ========================================
# Get all transactions from db and format
df_sql = pd.read_sql_table('trades', current_app.db.engine)
if not df_sql.empty:
df_sql = df_sql[(df_sql.user_id == current_user.username)]
# df = df[(df.trade_operation == "B") | (df.trade_operation == "S")]
df_sql['trade_date'] = pd.to_datetime(df_sql['trade_date'])
df_sql = df_sql.set_index('trade_date')
# Ignore times in df to merge - keep only dates
df_sql.index = df_sql.index.floor('d')
df_sql.index.rename('date', inplace=True)
# Merge both
df = df.append(df_sql, sort=False)
if df.empty:
logging.warning("Transactions_FX - No txs found")
return df
# The current fx needs no conversion, set to 1
df[fx_rate()['fx_rate']] = 1
# Need to get currencies into the df in order to normalize
# let's load a list of currencies needed and merge
list_of_fx = df.trade_currency.unique().tolist()
# loop through currency list
for currency in list_of_fx:
if currency == fx_rate()['fx_rate']:
continue
# Make a price request
df[currency] = df.apply(find_fx, axis=1)
# Now create a cash value in the preferred currency terms
df['fx'] = df.apply(lambda x: x[x['trade_currency']], axis=1)
df['cash_value_fx'] = df['cash_value'].astype(float) / df['fx'].astype(
float)
df['trade_fees_fx'] = df['trade_fees'].astype(float) / df['fx'].astype(
float)
df['trade_price_fx'] = df['trade_price'].astype(float) / df['fx'].astype(
float)
if 'trade_date' not in df.columns:
df['trade_date'] = pd.to_datetime(df.index)
return (df)
# UTILS -----------------------------------
# Better to use parseNumber most of the times...
# Function to clean CSV fields - leave only digits and .
def clean_float(text):
if isinstance(text, int):
return (float(text))
if isinstance(text, float):
return (text)
if text is None:
return (0)
acceptable = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "."]
str = ""
for char in text:
if char in acceptable:
str = str + char
if str == '':
return 0
str = float(str)
return (str)
def cleandate(text): # Function to clean Date fields
if text is None:
return (None)
text = str(text)
acceptable = [
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", "/", "-", ":",
" "
]
str_parse = ""
for char in text:
if char in acceptable:
char = '-' if (char == '.' or char == '/') else char
str_parse = str_parse + char
from dateutil import parser
str_parse = parser.parse(str_parse, dayfirst=True)
return (str_parse)
# PORTFOLIO UTILITIES
def positions():
# Method to create a user's position table
# Returns a df with the following information
# Ticker, name, quantity, small_pos
# THIS SHOULD CONTAIN THE STATIC FIELDS ONLY - no web requests
# It should be a light method to load quickly on the main page.
# Anything with web requests should be done on a separate function
# Get all transactions & group by ticker name and operation
df = transactions_fx()
if df.empty:
logging.warning("No Transactions Found")
return df
summary_table = df.groupby(['trade_asset_ticker', 'trade_operation'])[[
"trade_quantity", "cash_value_fx", "trade_fees_fx"
]].sum()
# Now let's create our main dataframe with information for each ticker
list_of_tickers = df['trade_asset_ticker'].unique().tolist()
main_df = pd.DataFrame({'trade_asset_ticker': list_of_tickers})
# Fill with positions, cash_values and fees
df_tmp = df.groupby(['trade_asset_ticker'])[[
"trade_quantity", "cash_value_fx", "trade_fees_fx"
]].sum()
main_df = | pd.merge(main_df, df_tmp, on='trade_asset_ticker') | pandas.merge |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QA queries on new CDR_deid Row Suppression-ICD10ICD9 Snome
#
# see [DC-852] AND [DC-732] for more details
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + tags=["parameters"]
project_id = ""
deid_cdr=""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# # 1 PRC_1 Verify all ICD9(764 -779)/ICD10(P) concept_codes used to specify other conditions originating In the perinatal period (including birth trauma),are not generated/displayed as condition_source_value in the CONDITION_OCCURENCE table
query = f'''
WITH ICD_suppressions AS (
SELECT concept_id
FROM `{project_id}.{deid_cdr}.concept`
WHERE
(vocabulary_id='ICD9CM' AND
(concept_code LIKE '764%' OR concept_code LIKE '765%' OR concept_code LIKE '766%' OR
concept_code LIKE '767%' OR concept_code LIKE '768%' OR concept_code LIKE '769%' OR concept_code LIKE '770%' OR
concept_code LIKE '771%' OR concept_code LIKE '772%' OR concept_code LIKE '773%' OR concept_code LIKE '774%' OR
concept_code LIKE '775%' OR concept_code LIKE '776%' OR concept_code LIKE '777%' OR concept_code LIKE '778%' OR
concept_code LIKE '779%'))
OR (vocabulary_id='ICD10CM' AND
concept_code LIKE 'P%')
)
SELECT COUNT (*) AS n_row_not_pass
FROM `{project_id}.{deid_cdr}.condition_occurrence` p1
JOIN ICD_suppressions p2
ON p1.condition_source_concept_id=p2.concept_id
WHERE condition_source_value IS NOT NULL
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
from .baseManager import BaseManager
from ..busSim import BusSim
from ...result.searchResult import SearchResult
import os
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from zipfile import ZipFile
import time
from tqdm import tqdm
class LocalManager(BaseManager):
def __init__(self, gtfs_path, out_path, borders):
super().__init__(gtfs_path, borders)
self.out_path = out_path
def run_batch(self, config, perf_df=None):
busSim_params = config.get_busSim_params()
start_times = config.get_start_times()
start_points = config.get_start_points()
result_df = | pd.DataFrame(columns=["geometry", "start_time", "map_identifier"]) | pandas.DataFrame |
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.analysis import analysis
from quetzal.engine import engine, nested_logit, optimal_strategy
from quetzal.engine.pathfinder import PublicPathFinder
from quetzal.engine.road_pathfinder import RoadPathFinder
from quetzal.model import preparationmodel
from syspy.assignment import raw as raw_assignment
from syspy.skims import skims
from tqdm import tqdm
class OptimalModel(preparationmodel.PreparationModel):
def get_optimal_strategy_edges(
self,
boarding_time=0,
alighting_time=0,
alpha=0.5,
target=None,
inf=1e9,
walk_on_road=False,
):
links = self.links.copy()
links['index'] = links.index
if walk_on_road:
road_links = self.road_links.copy()
road_links['time'] = road_links['walk_time']
footpaths = pd.concat([road_links, self.road_to_transit])
access = self.zone_to_road.copy()
else:
access = self.zone_to_transit.copy()
footpaths = self.footpaths.copy()
# transit edges
links['j'] = [tuple(l) for l in links[['b', 'trip_id']].values]
links['i'] = [tuple(l) for l in links[['a', 'trip_id']].values]
links['f'] = inf
links['c'] = links['time']
transit_edges = links[['i', 'j', 'f', 'c']].reset_index().values.tolist()
# boarding edges
links.index = 'boarding_' + links['index'].astype(str)
links['f'] = 1 / links['headway'] / alpha
if 'boarding_stochastic_utility' in links.columns:
links['f'] *= np.exp(links['boarding_stochastic_utility'])
links['c'] = boarding_time
boarding_edges = links[['a', 'i', 'f', 'c']].reset_index().values.tolist()
# alighting edges
links.index = 'alighting_' + links['index'].astype(str)
links['f'] = inf
links['c'] = alighting_time
alighting_edges = links[['j', 'b', 'f', 'c']].reset_index().values.tolist()
# access edges
if target is not None:
# we do not want to egress to a destination that is not the target
access = access.loc[(access['direction'] == 'access') | (access['b'] == target)]
access['f'] = inf
access['c'] = access['time']
access_edges = access[['a', 'b', 'f', 'c']].reset_index().values.tolist()
# footpaths
footpaths['f'] = inf
footpaths['c'] = footpaths['time']
footpaths_edges = footpaths[['a', 'b', 'f', 'c']].reset_index().values.tolist()
edges = access_edges + boarding_edges + transit_edges + alighting_edges + footpaths_edges
edges = [tuple(e) for e in edges]
return edges
def step_strategy_finder(self, *args, **kwargs):
s_dict = {}
node_df_list = []
all_edges = self.get_optimal_strategy_edges(*args, **kwargs)
for destination in tqdm(self.zones.index):
forbidden = set(self.zones.index) - {destination}
edges = [e for e in all_edges if e[2] not in forbidden]
strategy, u, f = optimal_strategy.find_optimal_strategy(edges, destination)
s_dict[destination] = strategy
node_df = pd.DataFrame({'f': pd.Series(f), 'u': pd.Series(u)})
node_df['destination'] = destination
node_df_list.append(node_df)
optimal_strategy_nodes = pd.concat(node_df_list)
edges = self.get_optimal_strategy_edges(*args, **kwargs)
optimal_strategy_sets = pd.Series(s_dict).apply(list)
optimal_strategy_edges = pd.DataFrame(
edges, columns=['ix', 'i', 'j', 'f', 'c']).set_index('ix')
assert optimal_strategy_edges.index.is_unique
self.optimal_strategy_edges = optimal_strategy_edges
self.optimal_strategy_sets = optimal_strategy_sets
self.optimal_strategy_nodes = optimal_strategy_nodes
nodes = optimal_strategy_nodes.copy()
nodes.index.name = 'origin'
nodes.set_index('destination', append=True, inplace=True)
pt_los = nodes.loc[self.zones.index]['u'].reset_index().rename(columns={'u': 'gtime'})
pt_los['pathfinder_session'] = 'optimal_strategy'
self.pt_los = pt_los
def step_strategy_assignment(self, volume_column, road=False):
dvol = self.volumes.groupby('destination')[volume_column].sum()
destinations = list(dvol.loc[dvol > 0].index)
destination_indexed_volumes = self.volumes.set_index(['destination', 'origin'])[volume_column]
destination_indexed_nodes = self.optimal_strategy_nodes.set_index(
'destination', append=True).swaplevel()
destination_indexed_strategies = self.optimal_strategy_sets
indexed_edges = self.optimal_strategy_edges[['i', 'j', 'f', 'c']]
node_volume = {}
edge_volume = {}
for destination in tqdm(destinations) if len(destinations) > 1 else destinations:
try:
sources = destination_indexed_volumes.loc[destination]
subset = destination_indexed_strategies.loc[destination]
edges = indexed_edges.loc[subset].reset_index().values.tolist()
f = destination_indexed_nodes.loc[destination]['f'].to_dict()
u = destination_indexed_nodes.loc[destination]['u'].to_dict()
except KeyError:
continue
node_v, edge_v = optimal_strategy.assign_optimal_strategy(sources, edges, u, f)
for k, v in node_v.items():
node_volume[k] = node_volume.get(k, 0) + v
for k, v in edge_v.items():
edge_volume[k] = edge_volume.get(k, 0) + v
loaded_edges = self.optimal_strategy_edges
loaded_edges.drop(volume_column, axis=1, errors='ignore', inplace=True)
loaded_edges[volume_column] = pd.Series(edge_volume)
df = loaded_edges[['i', 'j', volume_column]].dropna(subset=[volume_column])
self.links.drop(volume_column, axis=1, errors='ignore', inplace=True)
links = self.links.copy()
links['index'] = links.index
# transit edges
links['j'] = [tuple(l) for l in links[['b', 'trip_id']].values]
links['i'] = [tuple(l) for l in links[['a', 'trip_id']].values]
transit = pd.merge(links, df, on=['i', 'j'])
boardings = pd.merge(links, df, left_on=['a', 'i'], right_on=['i', 'j'])
alightings = pd.merge(links, df, left_on=['j', 'b'], right_on=['i', 'j'])
loaded_links = self.links.copy()
loaded_links[volume_column] = transit.set_index('index')[volume_column]
loaded_links['boardings'] = boardings.set_index('index')[volume_column]
loaded_links['alightings'] = alightings.set_index('index')[volume_column]
loaded_nodes = self.nodes.copy()
loaded_nodes.drop('boardings', axis=1, errors='ignore', inplace=True)
loaded_nodes.drop('alightings', axis=1, errors='ignore', inplace=True)
loaded_nodes['boardings'] = boardings.groupby('a')[volume_column].sum()
loaded_nodes['alightings'] = alightings.groupby('b')[volume_column].sum()
self.loaded_edges = loaded_edges
self.nodes = loaded_nodes
self.links = loaded_links
if road:
self.road_links[volume_column] = raw_assignment.assign(
volume_array=list(self.links[volume_column]),
paths=list(self.links['road_link_list'])
)
# todo remove 'load' from analysis module:
self.road_links['load'] = self.road_links[volume_column]
def analysis_strategy_time(self, boarding_time=0, alighting_time=0, inf=1e9, walk_on_road=True):
assert walk_on_road == True # TODO implement for ACF
zero = 1 / inf
# add a column for each type of time to the os edges
edges = self.optimal_strategy_edges
edges['rtt_time'] = self.road_to_transit['time']
edges['ztr_time'] = self.zone_to_road['time']
edges['in_vehicle_time'] = self.links['time']
edges.loc[['boarding_' in i for i in edges.index], 'boarding_time'] = boarding_time
edges.loc[['alighting_' in i for i in edges.index], 'alighting_time'] = alighting_time
if walk_on_road:
edges['road_time'] = self.road_links['walk_time']
edges.fillna(0, inplace=True)
edges['walk_time'] = edges['road_time'] + edges['rtt_time'] + edges['ztr_time']
self.optimal_strategy_edges = edges
# sum over the edges of a strategy the varios types of times
od_cost = []
columns = ['in_vehicle_time', 'boarding_time', 'walk_time']
indexed_edges = self.optimal_strategy_edges[['i', 'j', 'f', 'c']]
edges = indexed_edges.reset_index().values.tolist()
nodes = set.union(*[{i, j} for ix, i, j, f, c in edges])
edge_data = {ix: (i, j, fa, ca) for ix, i, j, fa, ca in edges}
cost_dict = {
key: self.optimal_strategy_edges[key].to_dict()
for key in columns
}
origins = destinations = list(self.zones.index)
for destination in tqdm(destinations):
u = {
key:{node:0 for node in nodes}
for key in columns
}
f = {node:0 for node in nodes} # here 0 * inf = 0 because inf = 1e9
F = {node: zero for node in nodes} # here zero * inf = 1
U = {node: inf for node in nodes}
U[destination] = 0
for ix in self.optimal_strategy_sets[destination]:
i, j, fa, _ = edge_data[ix]
for key in columns:
ca = cost_dict[key][ix]
u[key][i] = (f[i] * u[key][i] + fa * (u[key][j] + ca)) / (f[i] + fa)
U[i] = (F[i] * U[i] + fa * (U[j])) / (F[i] + fa)
F[i] = F[i] + fa
f[i] = f[i] + fa
u['waiting_time'] = U
time_columns = columns + ['waiting_time']
for key in time_columns :
for origin in origins:
od_cost.append([key, origin, destination, u[key][origin]])
data = pd.DataFrame(od_cost, columns=['key', 'origin', 'destination', 'cost'])
right = data.set_index(['key', 'origin', 'destination'])['cost'].unstack('key').reset_index()
self.pt_los.drop(time_columns, axis=1, inplace=True, errors='ignore')
self.pt_los = | pd.merge(self.pt_los, right, on=['origin', 'destination']) | pandas.merge |
import sys
import unittest
import numpy as np
import pandas as pd
sys.path.append("../../")
from thex_data.data_consts import TARGET_LABEL, UNDEF_CLASS
from mainmodel.helper_compute import *
from mainmodel.helper_plotting import *
from models.binary_model.binary_model import BinaryModel
from models.ind_model.ind_model import OvAModel
from models.multi_model.multi_model import MultiModel
import warnings
"""
Tests to make sure calculations of experimental performance (purity, completeness, balanced purity, aggregated balanced purity, etc.) is all correct.
Test functions in MainModel through BinaryModel, MultiModel, and OvAModel, since MainModel is an abstract class
Run tests with:
python -m unittest
"""
class TestModelMetrics(unittest.TestCase):
def generate_data(self, original, num_datapoints):
data = []
for i in range(num_datapoints):
noise = np.random.normal(loc=0, scale=0.1, size=3)
data.append(original + noise)
return np.array(data)
def setUp(self):
warnings.filterwarnings('ignore')
num_type1 = 20
num_type2 = 10
c1 = self.generate_data([0.2, 0.3, .9], num_type1)
c2 = self.generate_data([0.3, 0.1, .8], num_type2)
data_X = np.concatenate((c1, c2))
fake_X = pd.DataFrame(data_X, columns=['f1', 'f2', 'f3'])
fake_Y = pd.DataFrame(["Ia"] * num_type1 + ["CC"] *
num_type2, columns=[TARGET_LABEL])
fake_data = [fake_X, fake_Y]
self.class_labels = ["TDE", "II", "Ib/c"]
# Rows for max prob range
preds = [[0.9, 0.1, 0, "TDE"],
[0.95, 0.05, 0, "TDE"],
[0.92, 0, 0.08, "TDE"],
[0.1, 0.9, 0, "TDE"],
[0.1, 0.9, 0, "II"],
[0, 0.9, 0.1, "II"],
[0.91, 0.09, 0.1, "Ib/c"],
[0, 0.9, 0.1, "Ib/c"],
[0, 0.05, 0.95, "Ib/c"],
[0, 0.01, 0.99, "Ib/c"],
# .8 to .9
[0.81, 0.09, 0.01, "TDE"],
# Preds for lower prob ranges
[0.65, 0.35, 0, "TDE"],
[0.6, 0.4, 0, "TDE"],
[0.35, 0.65, 0, "TDE"],
[0.35, 0, 0.65, "TDE"],
[0.35, 0, 0.65, "TDE"],
[0.1, 0.6, 0.3, "II"],
[0.1, 0.3, 0.6, "II"],
[0.1, 0.3, 0.6, "II"],
[0.6, 0.4, 0, "Ib/c"],
[0.4, 0.6, 0, "Ib/c"],
[0, 0.35, 0.65, "Ib/c"],
]
self.agg_results = preds
self.test_model = BinaryModel(data=fake_data,
class_labels=self.class_labels)
self.class_counts = self.test_model.get_class_counts(
| pd.DataFrame(preds) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Hosmer-Lemeshow test
@author: Alex (stackoverflow)
"""
import pandas as pd
import numpy as np
from scipy.stats import chi2
def hosmer_lemeshow_test(pihat,real_label):
# pihat=model.predict()
pihatcat=pd.cut(pihat, np.percentile(pihat,[0,25,50,75,100]),labels=False,include_lowest=True) #here I've chosen only 4 groups
meanprobs =[0]*4
expevents =[0]*4
obsevents =[0]*4
meanprobs2=[0]*4
expevents2=[0]*4
obsevents2=[0]*4
for i in range(4):
meanprobs[i]=np.mean(pihat[pihatcat==i])
expevents[i]=np.sum(pihatcat==i)*np.array(meanprobs[i])
obsevents[i]=np.sum(real_label[pihatcat==i])
meanprobs2[i]=np.mean(1-pihat[pihatcat==i])
expevents2[i]=np.sum(pihatcat==i)*np.array(meanprobs2[i])
obsevents2[i]=np.sum(1-real_label[pihatcat==i])
data1={'meanprobs':meanprobs,'meanprobs2':meanprobs2}
data2={'expevents':expevents,'expevents2':expevents2}
data3={'obsevents':obsevents,'obsevents2':obsevents2}
m=pd.DataFrame(data1)
e= | pd.DataFrame(data2) | pandas.DataFrame |
__author__ = 'thor'
# import ut
import ut.util.ulist
import ut.daf.ch
import ut.daf.get
import pandas as pd
def group_and_count(df, count_col=None, frequency=False):
if isinstance(df, pd.Series):
t = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import sys
import stat
import time
import pickle
import traceback
import redis_lock
import contextlib
import abc
from pathlib import Path
import numpy as np
import pandas as pd
from typing import Union, Iterable
from collections import OrderedDict
from ..config import C
from ..utils import (
hash_args,
get_redis_connection,
read_bin,
parse_field,
remove_fields_space,
normalize_cache_fields,
normalize_cache_instruments,
)
from ..log import get_module_logger
from .base import Feature
from .ops import Operators # pylint: disable=W0611 # noqa: F401
class QlibCacheException(RuntimeError):
pass
class MemCacheUnit(abc.ABC):
"""Memory Cache Unit."""
def __init__(self, *args, **kwargs):
self.size_limit = kwargs.pop("size_limit", 0)
self._size = 0
self.od = OrderedDict()
def __setitem__(self, key, value):
# TODO: thread safe?__setitem__ failure might cause inconsistent size?
# precalculate the size after od.__setitem__
self._adjust_size(key, value)
self.od.__setitem__(key, value)
# move the key to end,make it latest
self.od.move_to_end(key)
if self.limited:
# pop the oldest items beyond size limit
while self._size > self.size_limit:
self.popitem(last=False)
def __getitem__(self, key):
v = self.od.__getitem__(key)
self.od.move_to_end(key)
return v
def __contains__(self, key):
return key in self.od
def __len__(self):
return self.od.__len__()
def __repr__(self):
return f"{self.__class__.__name__}<size_limit:{self.size_limit if self.limited else 'no limit'} total_size:{self._size}>\n{self.od.__repr__()}"
def set_limit_size(self, limit):
self.size_limit = limit
@property
def limited(self):
"""whether memory cache is limited"""
return self.size_limit > 0
@property
def total_size(self):
return self._size
def clear(self):
self._size = 0
self.od.clear()
def popitem(self, last=True):
k, v = self.od.popitem(last=last)
self._size -= self._get_value_size(v)
return k, v
def pop(self, key):
v = self.od.pop(key)
self._size -= self._get_value_size(v)
return v
def _adjust_size(self, key, value):
if key in self.od:
self._size -= self._get_value_size(self.od[key])
self._size += self._get_value_size(value)
@abc.abstractmethod
def _get_value_size(self, value):
raise NotImplementedError
class MemCacheLengthUnit(MemCacheUnit):
def __init__(self, size_limit=0):
super().__init__(size_limit=size_limit)
def _get_value_size(self, value):
return 1
class MemCacheSizeofUnit(MemCacheUnit):
def __init__(self, size_limit=0):
super().__init__(size_limit=size_limit)
def _get_value_size(self, value):
return sys.getsizeof(value)
class MemCache:
"""Memory cache."""
def __init__(self, mem_cache_size_limit=None, limit_type="length"):
"""
Parameters
----------
mem_cache_size_limit: cache max size.
limit_type: length or sizeof; length(call fun: len), size(call fun: sys.getsizeof).
"""
size_limit = C.mem_cache_size_limit if mem_cache_size_limit is None else mem_cache_size_limit
limit_type = C.mem_cache_limit_type if limit_type is None else limit_type
if limit_type == "length":
klass = MemCacheLengthUnit
elif limit_type == "sizeof":
klass = MemCacheSizeofUnit
else:
raise ValueError(f"limit_type must be length or sizeof, your limit_type is {limit_type}")
self.__calendar_mem_cache = klass(size_limit)
self.__instrument_mem_cache = klass(size_limit)
self.__feature_mem_cache = klass(size_limit)
def __getitem__(self, key):
if key == "c":
return self.__calendar_mem_cache
elif key == "i":
return self.__instrument_mem_cache
elif key == "f":
return self.__feature_mem_cache
else:
raise KeyError("Unknown memcache unit")
def clear(self):
self.__calendar_mem_cache.clear()
self.__instrument_mem_cache.clear()
self.__feature_mem_cache.clear()
class MemCacheExpire:
CACHE_EXPIRE = C.mem_cache_expire
@staticmethod
def set_cache(mem_cache, key, value):
"""set cache
:param mem_cache: MemCache attribute('c'/'i'/'f').
:param key: cache key.
:param value: cache value.
"""
mem_cache[key] = value, time.time()
@staticmethod
def get_cache(mem_cache, key):
"""get mem cache
:param mem_cache: MemCache attribute('c'/'i'/'f').
:param key: cache key.
:return: cache value; if cache not exist, return None.
"""
value = None
expire = False
if key in mem_cache:
value, latest_time = mem_cache[key]
expire = (time.time() - latest_time) > MemCacheExpire.CACHE_EXPIRE
return value, expire
class CacheUtils:
LOCK_ID = "QLIB"
@staticmethod
def organize_meta_file():
pass
@staticmethod
def reset_lock():
r = get_redis_connection()
redis_lock.reset_all(r)
@staticmethod
def visit(cache_path: Union[str, Path]):
# FIXME: Because read_lock was canceled when reading the cache, multiple processes may have read and write exceptions here
try:
cache_path = Path(cache_path)
meta_path = cache_path.with_suffix(".meta")
with meta_path.open("rb") as f:
d = pickle.load(f)
with meta_path.open("wb") as f:
try:
d["meta"]["last_visit"] = str(time.time())
d["meta"]["visits"] = d["meta"]["visits"] + 1
except KeyError as key_e:
raise KeyError("Unknown meta keyword") from key_e
pickle.dump(d, f, protocol=C.dump_protocol_version)
except Exception as e:
get_module_logger("CacheUtils").warning(f"visit {cache_path} cache error: {e}")
@staticmethod
def acquire(lock, lock_name):
try:
lock.acquire()
except redis_lock.AlreadyAcquired as lock_acquired:
raise QlibCacheException(
f"""It sees the key(lock:{repr(lock_name)[1:-1]}-wlock) of the redis lock has existed in your redis db now.
You can use the following command to clear your redis keys and rerun your commands:
$ redis-cli
> select {C.redis_task_db}
> del "lock:{repr(lock_name)[1:-1]}-wlock"
> quit
If the issue is not resolved, use "keys *" to find if multiple keys exist. If so, try using "flushall" to clear all the keys.
"""
) from lock_acquired
@staticmethod
@contextlib.contextmanager
def reader_lock(redis_t, lock_name: str):
current_cache_rlock = redis_lock.Lock(redis_t, f"{lock_name}-rlock")
current_cache_wlock = redis_lock.Lock(redis_t, f"{lock_name}-wlock")
lock_reader = f"{lock_name}-reader"
# make sure only one reader is entering
current_cache_rlock.acquire(timeout=60)
try:
current_cache_readers = redis_t.get(lock_reader)
if current_cache_readers is None or int(current_cache_readers) == 0:
CacheUtils.acquire(current_cache_wlock, lock_name)
redis_t.incr(lock_reader)
finally:
current_cache_rlock.release()
try:
yield
finally:
# make sure only one reader is leaving
current_cache_rlock.acquire(timeout=60)
try:
redis_t.decr(lock_reader)
if int(redis_t.get(lock_reader)) == 0:
redis_t.delete(lock_reader)
current_cache_wlock.reset()
finally:
current_cache_rlock.release()
@staticmethod
@contextlib.contextmanager
def writer_lock(redis_t, lock_name):
current_cache_wlock = redis_lock.Lock(redis_t, f"{lock_name}-wlock", id=CacheUtils.LOCK_ID)
CacheUtils.acquire(current_cache_wlock, lock_name)
try:
yield
finally:
current_cache_wlock.release()
class BaseProviderCache:
"""Provider cache base class"""
def __init__(self, provider):
self.provider = provider
self.logger = get_module_logger(self.__class__.__name__)
def __getattr__(self, attr):
return getattr(self.provider, attr)
@staticmethod
def check_cache_exists(cache_path: Union[str, Path], suffix_list: Iterable = (".index", ".meta")) -> bool:
cache_path = Path(cache_path)
for p in [cache_path] + [cache_path.with_suffix(_s) for _s in suffix_list]:
if not p.exists():
return False
return True
@staticmethod
def clear_cache(cache_path: Union[str, Path]):
for p in [
cache_path,
cache_path.with_suffix(".meta"),
cache_path.with_suffix(".index"),
]:
if p.exists():
p.unlink()
@staticmethod
def get_cache_dir(dir_name: str, freq: str = None) -> Path:
cache_dir = Path(C.dpm.get_data_uri(freq)).joinpath(dir_name)
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
class ExpressionCache(BaseProviderCache):
"""Expression cache mechanism base class.
This class is used to wrap expression provider with self-defined expression cache mechanism.
.. note:: Override the `_uri` and `_expression` method to create your own expression cache mechanism.
"""
def expression(self, instrument, field, start_time, end_time, freq):
"""Get expression data.
.. note:: Same interface as `expression` method in expression provider
"""
try:
return self._expression(instrument, field, start_time, end_time, freq)
except NotImplementedError:
return self.provider.expression(instrument, field, start_time, end_time, freq)
def _uri(self, instrument, field, start_time, end_time, freq):
"""Get expression cache file uri.
Override this method to define how to get expression cache file uri corresponding to users' own cache mechanism.
"""
raise NotImplementedError("Implement this function to match your own cache mechanism")
def _expression(self, instrument, field, start_time, end_time, freq):
"""Get expression data using cache.
Override this method to define how to get expression data corresponding to users' own cache mechanism.
"""
raise NotImplementedError("Implement this method if you want to use expression cache")
def update(self, cache_uri: Union[str, Path], freq: str = "day"):
"""Update expression cache to latest calendar.
Override this method to define how to update expression cache corresponding to users' own cache mechanism.
Parameters
----------
cache_uri : str or Path
the complete uri of expression cache file (include dir path).
freq : str
Returns
-------
int
0(successful update)/ 1(no need to update)/ 2(update failure).
"""
raise NotImplementedError("Implement this method if you want to make expression cache up to date")
class DatasetCache(BaseProviderCache):
"""Dataset cache mechanism base class.
This class is used to wrap dataset provider with self-defined dataset cache mechanism.
.. note:: Override the `_uri` and `_dataset` method to create your own dataset cache mechanism.
"""
HDF_KEY = "df"
def dataset(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1, inst_processors=[]
):
"""Get feature dataset.
.. note:: Same interface as `dataset` method in dataset provider
.. note:: The server use redis_lock to make sure
read-write conflicts will not be triggered
but client readers are not considered.
"""
if disk_cache == 0:
# skip cache
return self.provider.dataset(
instruments, fields, start_time, end_time, freq, inst_processors=inst_processors
)
else:
# use and replace cache
try:
return self._dataset(
instruments, fields, start_time, end_time, freq, disk_cache, inst_processors=inst_processors
)
except NotImplementedError:
return self.provider.dataset(
instruments, fields, start_time, end_time, freq, inst_processors=inst_processors
)
def _uri(self, instruments, fields, start_time, end_time, freq, **kwargs):
"""Get dataset cache file uri.
Override this method to define how to get dataset cache file uri corresponding to users' own cache mechanism.
"""
raise NotImplementedError("Implement this function to match your own cache mechanism")
def _dataset(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1, inst_processors=[]
):
"""Get feature dataset using cache.
Override this method to define how to get feature dataset corresponding to users' own cache mechanism.
"""
raise NotImplementedError("Implement this method if you want to use dataset feature cache")
def _dataset_uri(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1, inst_processors=[]
):
"""Get a uri of feature dataset using cache.
specially:
disk_cache=1 means using data set cache and return the uri of cache file.
disk_cache=0 means client knows the path of expression cache,
server checks if the cache exists(if not, generate it), and client loads data by itself.
Override this method to define how to get feature dataset uri corresponding to users' own cache mechanism.
"""
raise NotImplementedError(
"Implement this method if you want to use dataset feature cache as a cache file for client"
)
def update(self, cache_uri: Union[str, Path], freq: str = "day"):
"""Update dataset cache to latest calendar.
Override this method to define how to update dataset cache corresponding to users' own cache mechanism.
Parameters
----------
cache_uri : str or Path
the complete uri of dataset cache file (include dir path).
freq : str
Returns
-------
int
0(successful update)/ 1(no need to update)/ 2(update failure)
"""
raise NotImplementedError("Implement this method if you want to make expression cache up to date")
@staticmethod
def cache_to_origin_data(data, fields):
"""cache data to origin data
:param data: pd.DataFrame, cache data.
:param fields: feature fields.
:return: pd.DataFrame.
"""
not_space_fields = remove_fields_space(fields)
data = data.loc[:, not_space_fields]
# set features fields
data.columns = list(fields)
return data
@staticmethod
def normalize_uri_args(instruments, fields, freq):
"""normalize uri args"""
instruments = normalize_cache_instruments(instruments)
fields = normalize_cache_fields(fields)
freq = freq.lower()
return instruments, fields, freq
class DiskExpressionCache(ExpressionCache):
"""Prepared cache mechanism for server."""
def __init__(self, provider, **kwargs):
super(DiskExpressionCache, self).__init__(provider)
self.r = get_redis_connection()
# remote==True means client is using this module, writing behaviour will not be allowed.
self.remote = kwargs.get("remote", False)
def get_cache_dir(self, freq: str = None) -> Path:
return super(DiskExpressionCache, self).get_cache_dir(C.features_cache_dir_name, freq)
def _uri(self, instrument, field, start_time, end_time, freq):
field = remove_fields_space(field)
instrument = str(instrument).lower()
return hash_args(instrument, field, freq)
def _expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
_cache_uri = self._uri(instrument=instrument, field=field, start_time=None, end_time=None, freq=freq)
_instrument_dir = self.get_cache_dir(freq).joinpath(instrument.lower())
cache_path = _instrument_dir.joinpath(_cache_uri)
# get calendar
from .data import Cal # pylint: disable=C0415
_calendar = Cal.calendar(freq=freq)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
if self.check_cache_exists(cache_path, suffix_list=[".meta"]):
"""
In most cases, we do not need reader_lock.
Because updating data is a small probability event compare to reading data.
"""
# FIXME: Removing the reader lock may result in conflicts.
# with CacheUtils.reader_lock(self.r, 'expression-%s' % _cache_uri):
# modify expression cache meta file
try:
# FIXME: Multiple readers may result in error visit number
if not self.remote:
CacheUtils.visit(cache_path)
series = read_bin(cache_path, start_index, end_index)
return series
except Exception:
series = None
self.logger.error("reading %s file error : %s" % (cache_path, traceback.format_exc()))
return series
else:
# normalize field
field = remove_fields_space(field)
# cache unavailable, generate the cache
_instrument_dir.mkdir(parents=True, exist_ok=True)
if not isinstance(eval(parse_field(field)), Feature):
# When the expression is not a raw feature
# generate expression cache if the feature is not a Feature
# instance
series = self.provider.expression(instrument, field, _calendar[0], _calendar[-1], freq)
if not series.empty:
# This expression is empty, we don't generate any cache for it.
with CacheUtils.writer_lock(self.r, f"{str(C.dpm.get_data_uri(freq))}:expression-{_cache_uri}"):
self.gen_expression_cache(
expression_data=series,
cache_path=cache_path,
instrument=instrument,
field=field,
freq=freq,
last_update=str(_calendar[-1]),
)
return series.loc[start_index:end_index]
else:
return series
else:
# If the expression is a raw feature(such as $close, $open)
return self.provider.expression(instrument, field, start_time, end_time, freq)
def gen_expression_cache(self, expression_data, cache_path, instrument, field, freq, last_update):
"""use bin file to save like feature-data."""
# Make sure the cache runs right when the directory is deleted
# while running
meta = {
"info": {"instrument": instrument, "field": field, "freq": freq, "last_update": last_update},
"meta": {"last_visit": time.time(), "visits": 1},
}
self.logger.debug(f"generating expression cache: {meta}")
self.clear_cache(cache_path)
meta_path = cache_path.with_suffix(".meta")
with meta_path.open("wb") as f:
pickle.dump(meta, f, protocol=C.dump_protocol_version)
meta_path.chmod(stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
df = expression_data.to_frame()
r = np.hstack([df.index[0], expression_data]).astype("<f")
r.tofile(str(cache_path))
def update(self, sid, cache_uri, freq: str = "day"):
cp_cache_uri = self.get_cache_dir(freq).joinpath(sid).joinpath(cache_uri)
meta_path = cp_cache_uri.with_suffix(".meta")
if not self.check_cache_exists(cp_cache_uri, suffix_list=[".meta"]):
self.logger.info(f"The cache {cp_cache_uri} has corrupted. It will be removed")
self.clear_cache(cp_cache_uri)
return 2
with CacheUtils.writer_lock(self.r, f"{str(C.dpm.get_data_uri())}:expression-{cache_uri}"):
with meta_path.open("rb") as f:
d = pickle.load(f)
instrument = d["info"]["instrument"]
field = d["info"]["field"]
freq = d["info"]["freq"]
last_update_time = d["info"]["last_update"]
# get newest calendar
from .data import Cal, ExpressionD # pylint: disable=C0415
whole_calendar = Cal.calendar(start_time=None, end_time=None, freq=freq)
# calendar since last updated.
new_calendar = Cal.calendar(start_time=last_update_time, end_time=None, freq=freq)
# get append data
if len(new_calendar) <= 1:
# Including last updated calendar, we only get 1 item.
# No future updating is needed.
return 1
else:
# get the data needed after the historical data are removed.
# The start index of new data
current_index = len(whole_calendar) - len(new_calendar) + 1
# The existing data length
size_bytes = os.path.getsize(cp_cache_uri)
ele_size = np.dtype("<f").itemsize
assert size_bytes % ele_size == 0
ele_n = size_bytes // ele_size - 1
expr = ExpressionD.get_expression_instance(field)
lft_etd, rght_etd = expr.get_extended_window_size()
# The expression used the future data after rght_etd days.
# So the last rght_etd data should be removed.
# There are most `ele_n` period of data can be remove
remove_n = min(rght_etd, ele_n)
assert new_calendar[1] == whole_calendar[current_index]
data = self.provider.expression(
instrument, field, whole_calendar[current_index - remove_n], new_calendar[-1], freq
)
with open(cp_cache_uri, "ab") as f:
data = np.array(data).astype("<f")
# Remove the last bits
f.truncate(size_bytes - ele_size * remove_n)
f.write(data)
# update meta file
d["info"]["last_update"] = str(new_calendar[-1])
with meta_path.open("wb") as f:
pickle.dump(d, f, protocol=C.dump_protocol_version)
return 0
class DiskDatasetCache(DatasetCache):
"""Prepared cache mechanism for server."""
def __init__(self, provider, **kwargs):
super(DiskDatasetCache, self).__init__(provider)
self.r = get_redis_connection()
self.remote = kwargs.get("remote", False)
@staticmethod
def _uri(instruments, fields, start_time, end_time, freq, disk_cache=1, inst_processors=[], **kwargs):
return hash_args(*DatasetCache.normalize_uri_args(instruments, fields, freq), disk_cache, inst_processors)
def get_cache_dir(self, freq: str = None) -> Path:
return super(DiskDatasetCache, self).get_cache_dir(C.dataset_cache_dir_name, freq)
@classmethod
def read_data_from_cache(cls, cache_path: Union[str, Path], start_time, end_time, fields):
"""read_cache_from
This function can read data from the disk cache dataset
:param cache_path:
:param start_time:
:param end_time:
:param fields: The fields order of the dataset cache is sorted. So rearrange the columns to make it consistent.
:return:
"""
im = DiskDatasetCache.IndexManager(cache_path)
index_data = im.get_index(start_time, end_time)
if index_data.shape[0] > 0:
start, stop = (
index_data["start"].iloc[0].item(),
index_data["end"].iloc[-1].item(),
)
else:
start = stop = 0
with pd.HDFStore(cache_path, mode="r") as store:
if "/{}".format(im.KEY) in store.keys():
df = store.select(key=im.KEY, start=start, stop=stop)
df = df.swaplevel("datetime", "instrument").sort_index()
# read cache and need to replace not-space fields to field
df = cls.cache_to_origin_data(df, fields)
else:
df = pd.DataFrame(columns=fields)
return df
def _dataset(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=0, inst_processors=[]
):
if disk_cache == 0:
# In this case, data_set cache is configured but will not be used.
return self.provider.dataset(
instruments, fields, start_time, end_time, freq, inst_processors=inst_processors
)
# FIXME: The cache after resample, when read again and intercepted with end_time, results in incomplete data date
if inst_processors:
raise ValueError(
f"{self.__class__.__name__} does not support inst_processor. "
f"Please use `D.features(disk_cache=0)` or `qlib.init(dataset_cache=None)`"
)
_cache_uri = self._uri(
instruments=instruments,
fields=fields,
start_time=None,
end_time=None,
freq=freq,
disk_cache=disk_cache,
inst_processors=inst_processors,
)
cache_path = self.get_cache_dir(freq).joinpath(_cache_uri)
features = pd.DataFrame()
gen_flag = False
if self.check_cache_exists(cache_path):
if disk_cache == 1:
# use cache
with CacheUtils.reader_lock(self.r, f"{str(C.dpm.get_data_uri(freq))}:dataset-{_cache_uri}"):
CacheUtils.visit(cache_path)
features = self.read_data_from_cache(cache_path, start_time, end_time, fields)
elif disk_cache == 2:
gen_flag = True
else:
gen_flag = True
if gen_flag:
# cache unavailable, generate the cache
with CacheUtils.writer_lock(self.r, f"{str(C.dpm.get_data_uri(freq))}:dataset-{_cache_uri}"):
features = self.gen_dataset_cache(
cache_path=cache_path,
instruments=instruments,
fields=fields,
freq=freq,
inst_processors=inst_processors,
)
if not features.empty:
features = features.sort_index().loc(axis=0)[:, start_time:end_time]
return features
def _dataset_uri(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=0, inst_processors=[]
):
if disk_cache == 0:
# In this case, server only checks the expression cache.
# The client will load the cache data by itself.
from .data import LocalDatasetProvider # pylint: disable=C0415
LocalDatasetProvider.multi_cache_walker(instruments, fields, start_time, end_time, freq)
return ""
# FIXME: The cache after resample, when read again and intercepted with end_time, results in incomplete data date
if inst_processors:
raise ValueError(
f"{self.__class__.__name__} does not support inst_processor. "
f"Please use `D.features(disk_cache=0)` or `qlib.init(dataset_cache=None)`"
)
_cache_uri = self._uri(
instruments=instruments,
fields=fields,
start_time=None,
end_time=None,
freq=freq,
disk_cache=disk_cache,
inst_processors=inst_processors,
)
cache_path = self.get_cache_dir(freq).joinpath(_cache_uri)
if self.check_cache_exists(cache_path):
self.logger.debug(f"The cache dataset has already existed {cache_path}. Return the uri directly")
with CacheUtils.reader_lock(self.r, f"{str(C.dpm.get_data_uri(freq))}:dataset-{_cache_uri}"):
CacheUtils.visit(cache_path)
return _cache_uri
else:
# cache unavailable, generate the cache
with CacheUtils.writer_lock(self.r, f"{str(C.dpm.get_data_uri(freq))}:dataset-{_cache_uri}"):
self.gen_dataset_cache(
cache_path=cache_path,
instruments=instruments,
fields=fields,
freq=freq,
inst_processors=inst_processors,
)
return _cache_uri
class IndexManager:
"""
The lock is not considered in the class. Please consider the lock outside the code.
This class is the proxy of the disk data.
"""
KEY = "df"
def __init__(self, cache_path: Union[str, Path]):
self.index_path = cache_path.with_suffix(".index")
self._data = None
self.logger = get_module_logger(self.__class__.__name__)
def get_index(self, start_time=None, end_time=None):
# TODO: fast read index from the disk.
if self._data is None:
self.sync_from_disk()
return self._data.loc[start_time:end_time].copy()
def sync_to_disk(self):
if self._data is None:
raise ValueError("No data to sync to disk.")
self._data.sort_index(inplace=True)
self._data.to_hdf(self.index_path, key=self.KEY, mode="w", format="table")
# The index should be readable for all users
self.index_path.chmod(stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
def sync_from_disk(self):
# The file will not be closed directly if we read_hdf from the disk directly
with | pd.HDFStore(self.index_path, mode="r") | pandas.HDFStore |
import blpapi
import logging
import datetime
import pandas as pd
import contextlib
from collections import defaultdict
from pandas import DataFrame
@contextlib.contextmanager
def bopen(debug=False):
con = BCon(debug=debug)
con.start()
try:
yield con
finally:
con.stop()
class BCon(object):
def __init__(self, host='localhost', port=8194, debug=False):
"""
Create an object which manages connection to the Bloomberg API session
Parameters
----------
host: str
Host name
port: int
Port to connect to
debug: Boolean {True, False}
Boolean corresponding to whether to log Bloomberg Open API request
and response messages to stdout
"""
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(host)
sessionOptions.setServerPort(port)
self._sessionOptions = sessionOptions
# Create a Session
self.session = blpapi.Session(sessionOptions)
# initialize logger
self.debug = debug
@property
def debug(self):
"""
When True, print all Bloomberg Open API request and response messages
to stdout
"""
return self._debug
@debug.setter
def debug(self, value):
"""
Set whether logging is True or False
"""
self._debug = value
root = logging.getLogger()
if self._debug:
# log requests and responses
root.setLevel(logging.DEBUG)
else:
# log only failed connections
root.setLevel(logging.INFO)
def start(self):
"""
start connection and init service for refData
"""
# Start a Session
if not self.session.start():
logging.info("Failed to start session.")
return
self.session.nextEvent()
# Open service to get historical data from
if not self.session.openService("//blp/refdata"):
logging.info("Failed to open //blp/refdata")
return
self.session.nextEvent()
# Obtain previously opened service
self.refDataService = self.session.getService("//blp/refdata")
self.session.nextEvent()
def restart(self):
"""
Restart the blp session
"""
# Recreate a Session
self.session = blpapi.Session(self._sessionOptions)
self.start()
def _create_req(self, rtype, tickers, flds, ovrds, setvals):
# flush event queue in case previous call errored out
while(self.session.tryNextEvent()):
pass
request = self.refDataService.createRequest(rtype)
for t in tickers:
request.getElement("securities").appendValue(t)
for f in flds:
request.getElement("fields").appendValue(f)
for name, val in setvals:
request.set(name, val)
overrides = request.getElement("overrides")
for ovrd_fld, ovrd_val in ovrds:
ovrd = overrides.appendElement()
ovrd.setElement("fieldId", ovrd_fld)
ovrd.setElement("value", ovrd_val)
return request
def bdh(self, tickers, flds, start_date, end_date, elms=[],
ovrds=[], longdata=False):
"""
Get tickers and fields, return pandas dataframe with column MultiIndex
of tickers and fields if multiple fields given an Index otherwise.
If single field is given DataFrame is ordered same as tickers,
otherwise MultiIndex is sorted
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")]
Refer to A.2.4 HistoricalDataRequest in the Developers Guide for
more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted
"""
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = DataFrame(data)
df.columns = ["date", "ticker", "field", "value"]
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"])
if not longdata:
cols = ['ticker', 'field']
df = df.set_index(['date'] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df
def _bdh_list(self, tickers, flds, start_date, end_date, elms,
ovrds):
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
setvals = elms
setvals.append(("startDate", start_date))
setvals.append(("endDate", end_date))
request = self._create_req("HistoricalDataRequest", tickers, flds,
ovrds, setvals)
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
data = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
if msg.getElement('securityData').hasElement('securityError') or (msg.getElement('securityData').getElement("fieldExceptions").numValues() > 0): # NOQA
raise Exception(msg)
ticker = msg.getElement('securityData').getElement('security').getValue() # NOQA
fldDatas = msg.getElement('securityData').getElement('fieldData') # NOQA
for fd in fldDatas.values():
dt = fd.getElement('date').getValue()
for element in fd.elements():
fname = str(element.name())
if fname == "date":
continue
val = element.getValue()
data.append((dt, ticker, fname, val))
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return data
def ref(self, tickers, flds, ovrds=[]):
"""
Make a reference data request, get tickers and fields, return long
pandas Dataframe with columns [ticker, field, value]
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
"""
data = self._ref(tickers, flds, ovrds)
data = DataFrame(data)
data.columns = ["ticker", "field", "value"]
return data
def _ref(self, tickers, flds, ovrds):
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
request = self._create_req("ReferenceDataRequest", tickers, flds,
ovrds, [])
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
data = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
fldData = msg.getElement('securityData')
for i in range(fldData.numValues()):
ticker = (fldData.getValue(i).getElement("security").getValue()) # NOQA
reqFldsData = (fldData.getValue(i).getElement('fieldData'))
for j in range(reqFldsData.numElements()):
fld = flds[j]
# this is for dealing with requests which return arrays
# of values for a single field
if reqFldsData.getElement(fld).isArray():
lrng = reqFldsData.getElement(fld).numValues()
for k in range(lrng):
elms = (reqFldsData.getElement(fld).getValue(k).elements()) # NOQA
# if the elements of the array have multiple
# subelements this will just append them all
# into a list
for elm in elms:
data.append([ticker, fld, elm.getValue()])
else:
val = reqFldsData.getElement(fld).getValue()
data.append([ticker, fld, val])
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return data
def ref_hist(self, tickers, flds, start_date,
end_date=datetime.date.today().strftime('%Y%m%d'),
timeout=2000, longdata=False):
"""
Get tickers and fields, periodically override REFERENCE_DATE to create
a time series. Return pandas dataframe with column MultiIndex
of tickers and fields if multiple fields given, Index otherwise.
If single field is given DataFrame is ordered same as tickers,
otherwise MultiIndex is sorted
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
timeout: int
Passed into nextEvent(timeout), number of milliseconds before
timeout occurs
"""
# correlationIDs should be unique to a session so rather than
# managing unique IDs for the duration of the session just restart
# a session for each call
self.restart()
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
# Create and fill the request for the historical data
request = self.refDataService.createRequest("ReferenceDataRequest")
for t in tickers:
request.getElement("securities").appendValue(t)
for f in flds:
request.getElement("fields").appendValue(f)
overrides = request.getElement("overrides")
dates = pd.date_range(start_date, end_date, freq='b')
ovrd = overrides.appendElement()
for dt in dates:
ovrd.setElement("fieldId", "REFERENCE_DATE")
ovrd.setElement("value", dt.strftime('%Y%m%d'))
# CorrelationID used to keep track of which response coincides with
# which request
cid = blpapi.CorrelationId(dt)
logging.debug("Sending Request:\n %s" % request)
self.session.sendRequest(request, correlationId=cid)
data = []
# Process received events
while(True):
ev = self.session.nextEvent(timeout)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
corrID = msg.correlationIds()[0].value()
fldData = msg.getElement('securityData')
for i in range(fldData.numValues()):
tckr = (fldData.getValue(i).getElement("security").getValue()) # NOQA
reqFldsData = (fldData.getValue(i).getElement('fieldData'))
for j in range(reqFldsData.numElements()):
fld = flds[j]
val = reqFldsData.getElement(fld).getValue()
data.append((fld, tckr, val, corrID))
if ev.eventType() == blpapi.Event.TIMEOUT:
# All events processed
if (len(data) / len(flds) / len(tickers)) == len(dates):
break
else:
raise(RuntimeError("Timeout, increase timeout parameter"))
data = pd.DataFrame(data)
data.columns = ['field', 'ticker', 'value', 'date']
data = data.sort_values(by='date')
data = data.reset_index(drop=True)
data = data.loc[:, ['date', 'field', 'ticker', 'value']]
if not longdata:
cols = ['ticker', 'field']
data = data.set_index(['date'] + cols).unstack(cols)
data.columns = data.columns.droplevel(0)
return data
def bdib(self, ticker, start_datetime, end_datetime, event_type, interval,
elms=[]):
"""
Get Open, High, Low, Close, Volume, and numEvents for a ticker.
Return pandas dataframe
Parameters
----------
ticker: string
String corresponding to ticker
start_datetime: string
UTC datetime in format YYYY-mm-ddTHH:MM:SS
end_datetime: string
UTC datetime in format YYYY-mm-ddTHH:MM:SS
event_type: string {TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID,
BEST_ASK}
Requested data event type
interval: int {1... 1440}
Length of time bars
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, refer to A.2.8 IntradayBarRequest in the
Developers Guide for more info on these values
"""
# flush event queue in case previous call errored out
while(self.session.tryNextEvent()):
pass
# Create and fill the request for the historical data
request = self.refDataService.createRequest("IntradayBarRequest")
request.set("security", ticker)
request.set("eventType", event_type)
request.set("interval", interval) # bar interval in minutes
request.set("startDateTime", start_datetime)
request.set("endDateTime", end_datetime)
for name, val in elms:
request.set(name, val)
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
# defaultdict - later convert to pandas
data = defaultdict(dict)
# Process received events
flds = ['open', 'high', 'low', 'close', 'volume', 'numEvents']
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
barTick = (msg.getElement('barData')
.getElement('barTickData'))
for i in range(barTick.numValues()):
for fld in flds:
dt = barTick.getValue(i).getElement(0).getValue()
val = (barTick.getValue(i).getElement(fld).getValue())
data[(fld)][dt] = val
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completly received, so we could exit
break
data = | DataFrame(data) | pandas.DataFrame |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = | pd.DataFrame.dot(implied_, self.outer_loadings.T) | pandas.DataFrame.dot |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Main entry point for data_inspection. This script reads in
tabular patient data and analyzes it for outliers. First, it inspects specified
columns for data integrity (missing values) and produces histograms if appropriate.
Then it analyzes specified 2d relationships, producing scatter plots and identifying
outliers.
Finally it runs the DBSCAN algorithm to flag any potential outliers.
Note that on my machine this uses the venv "tabular_analysis"
"""
import os
import shutil
from collections import Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn import preprocessing
from support import is_empty, numbery_string_to_number
data_path = r'/Users/skyjones/Documents/inspection/SCD_pt_data_labels_piped.csv'
out_folder = r'/Users/skyjones/Documents/inspection/analysis' # should not exist
# column that contains the unique deidentified patient ID
study_id_col = 'Study ID'
# columns we want to inspect for completeness and produce histograms/barplots for
# each key is a column name, and the value is True if there MUST be a value and
# False if there does not need to be a value. If there must be a value if and
# only if another column(s) is filled, then the value should be a list of those columns
single_cols = {
'Age': True,
'Race': True,
'Hemoglobin genotype': True,
'Gender': True,
'BMI': True,
'Specify total HU daily dosage (mg)': True,
'HTN': True,
'Diabetes': True,
'Coronary artery disease': True,
'High cholesterol': True,
'Hgb': True,
'Hct/PCV': True,
'MRI 1 - Pulse ox results': True, # note the extra space
'MRI 2 - Pulse ox results': True,
'MRI 3 - Pulse ox results': True,
'MCV': True,
'Receiving regular blood transfusions': True,
r'Initial hemoglobin S% (pretransfusion if applicable)': True,
r'Results': True, # this is posttransfusion HbS%, and it's amazing it's the only column with this name
r'MRI 1 - SBP': True,
r'MRI 1 - DBP': True,
r'MRI 2 - SBP': True,
r'MRI 2 - DBP': True,
r'MRI 3 - SBP': True,
r'MRI 3 - DBP': True,
}
# 2d relationships we want to use to check for outliers. [independent, dependent]
# numeric data only pls
double_cols = [['Specify total HU daily dosage (mg)', 'MCV'],
['Specify total HU daily dosage (mg)', 'Initial hemoglobin S% (pretransfusion if applicable)'],
['Age', 'MRI 1 - SBP'],
['Age', 'MRI 1 - DBP'],
['Age', 'MRI 2 - SBP'],
['Age', 'MRI 2 - DBP'],
['Age', 'MRI 3 - SBP'],
['Age', 'MRI 3 - DBP']]
contam = 0.07 # estimated % of data that are outliers
text_size = 7
np.random.seed(1)
#######################################
###### setup
mono_folder = os.path.join(out_folder, 'mono')
bi_folder = os.path.join(out_folder, 'bi')
multi_folder = os.path.join(out_folder, 'multi')
custom_folder = os.path.join(out_folder, 'custom')
overview_report = os.path.join(out_folder, 'overview.txt')
missing_data_report = os.path.join(out_folder, 'missing_data.csv')
outliers_report = os.path.join(out_folder, 'outliers.csv')
try:
os.mkdir(out_folder)
except FileExistsError:
no_answer = True
while no_answer:
ans = input('The output directory exists. Overwrite? [y/n]\n')
if ans == 'y':
no_answer = False
shutil.rmtree(out_folder)
os.mkdir(out_folder)
elif ans == 'n':
raise FileExistsError('File exists. Process aborted')
else:
print('Response must be "y" or "n"')
log_file = os.path.join(out_folder, 'log.txt')
log = open(log_file, 'w')
os.mkdir(mono_folder)
os.mkdir(bi_folder)
os.mkdir(multi_folder)
os.mkdir(custom_folder)
sep = '|'
df = pd.read_csv(data_path, sep=sep, low_memory=False, dtype={study_id_col:'object'})
problem_pts_cols = [study_id_col]
problem_pts_cols.extend(single_cols.keys())
problem_pts = pd.DataFrame(columns=problem_pts_cols)
problem_pts = problem_pts.set_index('Study ID') # this data will relate pt IDs to a list of columns for which data
# is missing, iff that missing data is marked as essential (by the variable single_cols)
outlier_pts = {} # this data will relate pt IDs to a list of columns for which
# the data seems to be an outlier
###### plot and inspect the monodimensional data
problem_patients_dict = {}
for col in single_cols:
data = df[col]
pts = df[study_id_col]
plt.figure(figsize=(8,12))
plt.title(col)
print(f'Plotting: {col}. dtype is {data.dtype}')
if data.dtype == 'object':
counts = Counter(data)
if np.nan in counts:
counts['nan'] = counts[np.nan]
del counts[np.nan]
n_v = [(n,v) for n,v in counts.most_common()]
names = [n for n,v in n_v]
values = [v for n,v in n_v]
plt.ylabel('Count')
plt.bar(names, values)
else:
# plt.hist(data)
data_drop = data.dropna()
result = plt.boxplot(data_drop, notch=True)
plt.ylabel('Value')
points = result['fliers'][0].get_data()
exes = points[0]+.01
whys = points[1]
for x,y in zip(exes,whys):
matches = pts[data == y]
label = ''
for m in matches:
label += f'{m} + '
label = label[:-3]
plt.annotate(label, (x,y), fontsize=8)
# plt.xlabel('Value')
scrub_col = col.replace('/', '-') # replace slashes with dashes to protect filepath
fig_name = os.path.join(mono_folder, f'{scrub_col}.png')
plt.savefig(fig_name)
plt.close()
print('Evaluating completeness')
for i, row in df.iterrows():
# explicit comparisons of bools needed because we are exploiting the ability to mix key datatypes
if not is_empty(row[col]):
has_data = True
# print('Is not empty')
elif single_cols[col] is False:
has_data = True
# print('Does not need data')
elif single_cols[col] is True: # if data is required
has_data = False
# print('Does not have data and deffo needs it')
else: # if we get here, need to see if the companion columns are filled
# if all companion columns are filled, then data is required
companions = [row[c] for c in single_cols[col]]
has_required_companions = all([not is_empty(row[c]) for c in single_cols[col]])
has_data = not has_required_companions
if not has_data:
pt_id = row[study_id_col]
try:
problem_patients_dict[pt_id].append(col)
except KeyError:
problem_patients_dict[pt_id] = [col]
# write the missing data report
for pt, cols in problem_patients_dict.items():
insert = pd.Series({col:1 for col in cols}, name=pt)
problem_pts = problem_pts.append(insert, ignore_index=False)
problem_pts = problem_pts.sort_index()
problem_pts.to_csv(missing_data_report)
print('\n')
###### do the 2d analyses
for ind_col, dep_col in double_cols:
print(f'2d: {ind_col} and {dep_col}')
fig_name = os.path.join(bi_folder, f'{dep_col}-v-{ind_col}.png')
plt.figure()
plt.title(f'{dep_col} vs. {ind_col}')
x = df[ind_col]
y = df[dep_col]
pt_id = df[study_id_col]
try:
x = [numbery_string_to_number(i) for i in x]
y = [numbery_string_to_number(i) for i in y]
data = np.array( [np.array( [a,b] ) for a,b,c in zip(x,y,pt_id) if all([not np.isnan(a), not(np.isnan(b))]) ] )
pts = [ c for a,b,c in zip(x,y,pt_id) if all([not np.isnan(a), not(np.isnan(b))]) ]
clf = IsolationForest(max_samples='auto', random_state=1, contamination=contam)
preds = clf.fit_predict(data)
x = data[:,0]
y = data[:,1]
plt.scatter(x, y, c=preds)
for pt, x, y, p in zip(pts, x, y, preds):
if p == -1:
plt.annotate(pt, (x,y))
plt.xlabel(ind_col)
plt.ylabel(dep_col)
plt.savefig(fig_name)
plt.close()
except ValueError as e:
print(f'Error analyzing -{ind_col}- against -{dep_col}-')
log.write(f'Error analyzing -{ind_col}- against -{dep_col}-:\n\t{e}\n')
plt.close()
continue
###### multivariate outlier detection
print('\nRunning multivariate outlier analysis')
multifile = os.path.join(multi_folder, 'multivariate_detection.png')
multicolsfile = os.path.join(multi_folder, 'multivariate_cols.csv')
multisubsetfile = os.path.join(multi_folder, 'multivariate_subset.csv')
dump_folder = os.path.join(multi_folder, 'bin')
os.mkdir(dump_folder)
include_thresh = 0.3 # the minimum percentage of non-nan entries a column must have to be included in the multivariate analysis
# figure out which columns are numeric
exes = df[study_id_col]
numeric_cols = [c for c in df.columns if df[c].dtype != 'object']
numeric_cols = [n for n in numeric_cols if len(df[n].unique()) > 1] # has to not just be NaN
numeric_cols = [n for n in numeric_cols if 'Accession' not in n]
numeric_cols_nonthreshed = numeric_cols.copy()
numeric_cols = [n for n in numeric_cols if sum(~df[n].isna()) / len(df) > include_thresh] # has to have more non-NaN than the threshold
multidata = df[numeric_cols]
multidata_filled = multidata.fillna(multidata.mean())
# normalize the data
x = multidata_filled.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
multidata_filled = | pd.DataFrame(x_scaled) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from eemeter.features import (
compute_occupancy_feature,
compute_temperature_features,
compute_temperature_bin_features,
compute_time_features,
compute_usage_per_day_feature,
estimate_hour_of_week_occupancy,
get_missing_hours_of_week_warning,
fit_temperature_bins,
merge_features,
)
from eemeter.segmentation import segment_time_series
def test_compute_temperature_features_no_freq_index(
il_electricity_cdd_hdd_billing_monthly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data.index.freq = None
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_no_meter_data_tz(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
meter_data.index = meter_data.index.tz_localize(None)
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_no_temp_data_tz(
il_electricity_cdd_hdd_billing_monthly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.tz_localize(None)
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_hourly_temp_mean(il_electricity_cdd_hdd_hourly):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(meter_data.index, temperature_data)
assert list(sorted(df.columns)) == [
"n_hours_dropped",
"n_hours_kept",
"temperature_mean",
]
assert df.shape == (2952, 3)
assert round(df.temperature_mean.mean()) == 62.0
def test_compute_temperature_features_hourly_hourly_degree_days(
il_electricity_cdd_hdd_hourly, snapshot
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
assert df.shape == (2952, 6)
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_hourly_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_hourly, snapshot
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (2952, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_hourly_daily_degree_days_fail(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="daily",
)
def test_compute_temperature_features_hourly_daily_missing_explicit_freq(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
meter_data.index.freq = None
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="daily",
)
def test_compute_temperature_features_hourly_bad_degree_days(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_hourly_data_quality(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (2952, 4)
assert list(sorted(df.columns)) == [
"n_hours_dropped",
"n_hours_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 1.0
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_daily_temp_mean(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (810, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_daily_daily_degree_days(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_daily_degree_days_use_mean_false(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_hourly_degree_days(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_bad_degree_days(
il_electricity_cdd_hdd_daily
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_daily_data_quality(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (810, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 23.99
assert round(df.temperature_null.mean(), 2) == 0.00
def test_compute_temperature_features_billing_monthly_temp_mean(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (27, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_billing_monthly_daily_degree_days(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_daily_degree_days_use_mean_false(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_hourly_degree_days(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_bad_degree_day_method(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_billing_monthly_data_quality(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (27, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 729.23
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_billing_bimonthly_temp_mean(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (14, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_billing_bimonthly_daily_degree_days(
il_electricity_cdd_hdd_billing_bimonthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (14, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_bimonthly_hourly_degree_days(
il_electricity_cdd_hdd_billing_bimonthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (14, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_bimonthly_bad_degree_days(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_billing_bimonthly_data_quality(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (14, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 1478.77
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_shorter_temperature_data(
il_electricity_cdd_hdd_daily
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
# drop some data
temperature_data = temperature_data[:-200]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (810, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 43958.0
def test_compute_temperature_features_shorter_meter_data(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
# drop some data
meter_data = meter_data[:-10]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (800, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 43904.0
# ensure last row is NaN'ed
assert | pd.isnull(df.iloc[-1].n_days_kept) | pandas.isnull |
import urllib.request as url
from bs4 import BeautifulSoup
import pandas as pd
import os
import re
import csv
metadata = []
datasets_to_download = []
page_no = 1
seed_url = 'https://catalog.data.gov'
files_written = 0
while len(metadata) <= 1000:
try:
page = url.urlopen(seed_url + '/dataset?page=' + str(page_no))
page_no += 1
soup = BeautifulSoup(page, 'html.parser')
save_location = os.getcwd() + "/data/"
for listItem in soup.find_all("li", {"class": "dataset-item"}):
table = dict()
title = listItem.h3.a.text.strip() # title
#print(title)
table["sr_no"] = files_written
table["title"] = title
print(page_no)
new_soup = BeautifulSoup(url.urlopen(seed_url + listItem.h3.a.get('href')), 'html.parser') # download link
try:
download_link = new_soup.find("a", {"data-format": "csv"}) # href=re.compile('.*\.csv$'))
print(download_link.get('href'))
save_file = save_location + title + ".csv"
url.urlretrieve(download_link.get('href'), save_file)
data = | pd.read_csv(save_file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Monday 3 December 2018
@author: <NAME>
"""
import os
import pandas as pd
import numpy as np
import feather
import time
from datetime import date
import sys
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import normalize
import somoclu
from delprocess.loadprofiles import resampleProfiles
from .metrics import mean_index_adequacy, davies_bouldin_score
from ..support import cluster_dir, results_dir
def progress(n, stats):
"""Report progress information, return a string."""
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
return print(s)
def clusterStats(cluster_stats, n, X, cluster_labels, preprocessing, transform, tic, toc):
stats = {'n_sample': 0,
'cluster_size': [],
'silhouette': 0.0,
'dbi': 0.0,
'mia': 0.0,
'all_scores': 0.0,
# 'cdi': 0.0,
't0': time.time(),
'batch_fit_time': 0.0,
'total_sample': 0}
cluster_stats[n] = stats
try:
cluster_stats[n]['total_sample'] += X.shape[0]
cluster_stats[n]['n_sample'] = X.shape[0]
cluster_stats[n]['silhouette'] = silhouette_score(X, cluster_labels, sample_size=10000)
cluster_stats[n]['dbi'] = davies_bouldin_score(X, cluster_labels)
cluster_stats[n]['mia'] = mean_index_adequacy(X, cluster_labels)
#cluster_stats[n_clusters][y]['cdi'] =cluster_dispersion_index(Xbatch, cluster_labels) DON'T RUN LOCALLY!! - need to change to chunked alogrithm once released
cluster_stats[n]['cluster_size'] = np.bincount(cluster_labels)
cluster_stats[n]['batch_fit_time'] = toc - tic
cluster_stats[n]['preprocessing'] = preprocessing
cluster_stats[n]['transform'] = transform
cluster_stats[n]['all_scores'] = cluster_stats[n]['dbi']*cluster_stats[n]['mia']/cluster_stats[n]['silhouette']
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
print(s)
except:
print('Could not compute clustering stats for n = ' + str(n))
pass
return cluster_stats
def saveResults(experiment_name, cluster_stats, cluster_centroids, som_dim, elec_bin, save=True):
"""
Saves cluster stats results and centroids for a single clustering iteration.
Called inside kmeans() and som() functions.
"""
for k, v in cluster_stats.items():
n = k
evals = pd.DataFrame(cluster_stats).T
evals['experiment_name'] = experiment_name
evals['som_dim'] = som_dim
evals['n_clust'] = n
evals['elec_bin'] = elec_bin
eval_results = evals.drop(labels='cluster_size', axis=1).reset_index(drop=True)
# eval_results.rename({'index':'k'}, axis=1, inplace=True)
eval_results[['dbi','mia','silhouette']] = eval_results[['dbi','mia','silhouette']].astype(float)
eval_results['date'] = date.today().isoformat()
# eval_results['best_clusters'] = None
centroid_results = pd.DataFrame(cluster_centroids)
centroid_results['experiment_name'] = experiment_name
centroid_results['som_dim'] = som_dim
centroid_results['n_clust'] = n
centroid_results['elec_bin'] = elec_bin
try:
centroid_results['cluster_size'] = evals['cluster_size'][n]
except:
centroid_results['cluster_size'] = np.nan
centroid_results.reset_index(inplace=True)
centroid_results.rename({'index':'k'}, axis=1, inplace=True)
centroid_results['date'] = date.today().isoformat()
#3 Save Results
if save is True:
os.makedirs(results_dir, exist_ok=True)
erpath = os.path.join(results_dir, 'cluster_results.csv')
if os.path.isfile(erpath):
eval_results.to_csv(erpath, mode='a', index=False, header=False)
else:
eval_results.to_csv(erpath, index=False)
os.makedirs(cluster_dir, exist_ok=True)
crpath = os.path.join(cluster_dir, experiment_name + '_centroids.csv')
if os.path.isfile(crpath):
centroid_results.to_csv(crpath, mode='a', index=False, header=False)
else:
centroid_results.to_csv(crpath, index=False)
print('Results saved for', experiment_name, str(som_dim), str(n))
return eval_results, centroid_results
def xBins(X, bin_type):
if bin_type == 'amd':
Xdd_A = X.sum(axis=1)
Xdd = Xdd_A*230/1000
XmonthlyPower = resampleProfiles(Xdd, interval='M', aggfunc='sum')
Xamd = resampleProfiles(XmonthlyPower, interval='A', aggfunc='mean').reset_index().groupby('ProfileID').mean()
Xamd.columns=['amd']
amd_bins = [0, 1, 50, 150, 400, 600, 1200, 2500, 4000]
bin_labels = ['{0:.0f}-{1:.0f}'.format(x,y) for x, y in zip(amd_bins[:-1], amd_bins[1:])]
Xamd['bins'] = pd.cut(Xamd.amd, amd_bins, labels=bin_labels, right=True, include_lowest=True)
Xbin_dict = dict()
for c in Xamd.bins.cat.categories:
Xbin_dict[c] = Xamd[Xamd.bins==c].index.values
del Xdd_A, Xdd, XmonthlyPower, Xamd
if bin_type == 'integral':
Xint = normalize(X).cumsum(axis=1)
Xintn = pd.DataFrame(Xint, index=X.index)
Xintn['max'] = X.max(axis=1)
clusterer = MiniBatchKMeans(n_clusters=8, random_state=10)
clusterer.fit(np.array(Xintn))
cluster_labels = clusterer.predict(np.array(Xintn))
labl = pd.DataFrame(cluster_labels, index=X.index)
Xbin_dict = dict()
for c in labl[0].unique():
Xbin_dict['bin'+str(c)] = labl[labl[0]==c].index.values
return Xbin_dict
def preprocessX(X, norm=None):
if norm == 'unit_norm': #Kwac et al 2013
Xnorm = normalize(X)
elif norm == 'zero-one': #Dent et al 2014
Xnorm = np.array(X.divide(X.max(axis=1), axis=0))
elif norm == 'demin': #Jin et al 2016
Xnorm = normalize(X.subtract(X.min(axis=1), axis=0))
elif norm == 'sa_norm': #Dekenah 2014
Xnorm = np.array(X.divide(X.mean(axis=1), axis=0))
else:
Xnorm = np.array(X)
#Xnorm.fillna(0, inplace=True)
Xnorm[np.isnan(Xnorm)] = 0
return Xnorm
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
dim = 0 #set dim to 0 to match SOM formating
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
for n_clust in range_n_clusters:
clusterer = MiniBatchKMeans(n_clusters=n_clust, random_state=10)
#train clustering algorithm
tic = time.time()
clusterer.fit(A)
cluster_labels = clusterer.predict(A)
toc = time.time()
## Calculate scores
cluster_stats = clusterStats({}, n_clust, A, cluster_labels,
preprocessing = preprocessing, transform = None,
tic = tic, toc = toc)
cluster_centroids = clusterer.cluster_centers_
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n_clust] = cluster_labels
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = pd.concat([cluster_lbls, best_clusters], axis=1)
stats = pd.concat([stats, best_stats], axis=0)
stats.reset_index(drop=True, inplace=True)
if save is True:
saveLabels(cluster_lbls, stats)
return stats, centroids, cluster_lbls
def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):
"""
This function applies the self organising maps algorithm from somoclu on inputs X over square maps of range_n_dim.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
If kmeans = True, the KMeans algorithm from sklearn is applied to the SOM and returns clusters
kwargs can be n_clusters = range(start, end, interval) OR list()
Returns cluster stats, cluster centroids and cluster labels.
"""
for dim in range_n_dim:
limit = int(np.sqrt(len(X)/20))
if dim > limit: #verify that number of nodes are sensible for size of input data
return print('Input size too small for map. Largest n should be ' + str(limit))
else:
pass
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'0-4000':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
for dim in range_n_dim:
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
nrow = ncol = dim
tic = time.time()
#train clustering algorithm
som = somoclu.Somoclu(nrow, ncol, compactsupport=False, maptype='planar')
som.train(A)
toc = time.time()
if transform == None:
n_clust = [0]
elif transform == 'kmeans':
if kwargs is None:
n_clust = [10]
else:
for key, value in kwargs.items(): #create list with number of clusters for kmeans
if key == 'n_clusters':
n_clust = value
else:
return('Cannot process this transform algorithm')
for n in n_clust:
if n == 0:
#create empty matrix the size of the SOM
m = np.arange(0, nrow*ncol, 1).reshape(nrow, ncol)
else:
clusterer = KMeans(n_clusters=n, random_state=10)
som.cluster(algorithm=clusterer)
m = som.clusters
#get cluster of SOM node and assign to input vecors based on bmus
k = [m[som.bmus[i][1],som.bmus[i][0]] for i in range(0, len(som.bmus))]
c = | pd.DataFrame(A) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
class Sink:
def write(self, key, obj):
raise Exception('Virtual function is not overriden')
def flush(self):
raise Exception('Virtual function is not overriden')
class CompositeSink(Sink):
def __init__(self, sinks):
self._sinks = sinks
def write(self, key, obj):
for sink in self._sinks:
sink.write(key, obj)
def flush(self):
for sink in self._sinks:
sink.flush()
class HDFSink(Sink):
def __init__(self, ctx, file_path):
self._file_path = file_path
self._data = {}
self._logger = ctx.logger
def write(self, name, df, reset_index=True):
self._data[name] = df.reset_index() if reset_index else df
def flush(self):
store = | pd.HDFStore(self._file_path, complib='blosc', complevel=9) | pandas.HDFStore |
import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import exists
from pathlib import Path
def load_consistency_result(filename):
data = pd.read_csv(filename, header=None)
data = data.iloc[:, 0:2].copy()
#print(data)
data.columns = ["Ontology", "IsConsitency"]
return data
def load_evaluation_csv(file_name):
data = pd.read_csv(file_name, header=None)
isNumeric = data.iloc[:, 1:11].copy()
isNumeric = isNumeric.apply(lambda s: pd.to_numeric(s, errors='coerce').notnull().all(), axis=1)
#print(isNumeric)
isNumeric = isNumeric.index[isNumeric].tolist()
timeout = data.applymap(lambda x: 'timeout' in str(x).lower() ).any(axis=1)
timeout = data[timeout].copy()
time_count = timeout.shape[0]
new_data = data.iloc[isNumeric].copy()
#print(isNumeric)
return new_data
if __name__ == '__main__':
ore2015_statistics_file:str = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\ORE2015_Statistics.csv"
input_folder: str = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\ORE2015\\"
output_folder = "./output/"
#ore2015_statistics_file: str = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\BioOntology_Statistics.csv"
#input_folder: str = "C:\\Users\\anl\\SINTEF\\Skytrack@SINTEF - Documents\\General\\Task T1.3\\EvaluationResult\\Bio\\"
#output_folder = "./output/Bio/"
Path(output_folder).mkdir(parents=True, exist_ok=True)
columns = ["Ontology", "Run1", "Run2", "Run3", "Run4", "Run5", "Run6" ,"Run7" ,"Run8" ,"Run9" ,"Run10", "Mean", "Median"]
reasoner = "KoncludeCLI"
task_name = ["Consistency", "Classification", "Realization"]
#task_name = ["Classification", "Realization"]
filelist = {"Classification": "KoncludeCLI_classification.list", "Realization":"KoncludeCLI_realization.list"}
constency_dict = {}
for task in task_name:
file_name = input_folder + reasoner + "_" + task + "_old.csv"
full_data = pd.read_csv(file_name, header=None)
full_data.columns = columns
#print(full_data)
success_data = load_evaluation_csv(file_name)
success_data.columns = columns
#print(success_data)
unsuccess_data = full_data.merge(success_data, on="Ontology", how="left", indicator=True).query('_merge == "left_only"').drop('_merge', 1).iloc[:, 0:13].copy()
unsuccess_data.columns = columns
#print(unsuccess_data)
if task=="Consistency":
consistency_result_file = input_folder + reasoner + "_ConsistencyResult.csv"
consitency_result = load_consistency_result(consistency_result_file)
#print(consitency_result)
constency_dict = pd.Series(consitency_result.IsConsitency.values, index=consitency_result.Ontology).to_dict()
#print(constency_dict)
new_success_data = success_data.merge(consitency_result, on="Ontology", how="inner").iloc[:, 0:13].copy()
new_success_data.columns = columns
malicious_data = success_data.merge(new_success_data, on = "Ontology", how="left", indicator=True).query('_merge == "left_only"').drop('_merge', 1).iloc[:, 0:13].copy()
malicious_data.columns = columns
firstCol=[]
secondCol=[]
for index, row in malicious_data.iterrows():
firstCol.append(row['Ontology'])
secondCol.append("Unexpected Error")
new_mal_data = {"Ontology": firstCol, "Run1": secondCol}
malicious_data = | pd.DataFrame(new_mal_data) | pandas.DataFrame |
#!/usr/bin/env python3
import random, os, sys, logging, re
import pandas as pd
from Bio import SeqIO
try:
from Bio.Alphabet import generic_dna, IUPAC
Bio_Alphabet = True
except ImportError:
Bio_Alphabet = None
# usages of generic_dna, IUPAC are not supported in Biopython 1.78 (September 2020).
print(f"The installed BioPython is a new version that has removed the Alphabet module.",file=sys.stderr)
import numpy as np
from itertools import combinations, product
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.stats import sem
def writeOutputSeq(col_ID, col_seq, output_file, db):
"""
Generate output fasta file using db dataframe
"""
output_handle = open(output_file, 'w')
for key, row in db.iterrows():
output_handle.write('>%s\n%s\n' % (row[col_ID], row[col_seq]))
protein_handle.close()
def collapse_fasta(prefix):
print(prefix)
# parse joined reads
seq_dict_joined = getInputSeq(prefix + '_join.raw.fa')
seq_id_dict = {}
for id in seq_dict_joined:
seq = seq_dict_joined[id]
if seq not in seq_id_dict:
seq_id_dict[seq] = [id]
else:
seq_id_dict[seq].append(id)
fjoined = open((prefix + '_join.fa'), 'w')
for seq in seq_id_dict:
fjoined.write('>%s\n%s\n' % ('_'.join(seq_id_dict[seq]), seq))
fjoined.close()
# parse unjoined
seq_dict_R1 = getInputSeq(prefix + '_unjoinR1.raw.fa')
seq_dict_R2 = getInputSeq(prefix + '_unjoinR2.raw.fa')
seq_id_dict_R1R2 = {} # concated seq: [seq IDs]
for id in seq_dict_R1:
concat_seq = seq_dict_R1[id] + seq_dict_R2[id]
if concat_seq not in seq_id_dict_R1R2:
seq_id_dict_R1R2[concat_seq] = [id]
else:
seq_id_dict_R1R2[concat_seq].append(id)
fR1 = open(prefix + '_unjoinR1.fa', 'w')
fR2 = open(prefix + '_unjoinR2.fa', 'w')
for seq in seq_id_dict_R1R2:
fR1.write('>%s\n%s\n' % ('_'.join(seq_id_dict_R1R2[seq]), seq_dict_R1[seq_id_dict_R1R2[seq][0]]))
fR2.write('>%s\n%s\n' % ('_'.join(seq_id_dict_R1R2[seq]), seq_dict_R2[seq_id_dict_R1R2[seq][0]]))
fR1.close()
fR2.close()
def getInputSeq(seq_file):
"""
Arguments:
seq_file = a fasta file of sequences input
Returns:
a dictionary of {ID:Seq}
"""
### add print message to warn for empty dict
if not os.path.exists(seq_file):
print("[getInputSeq] %s FAILED TO LOAD. EMPTY DICT IS RETURNED. THIS MAY INFLUENCE YOUR RESULTS" % seq_file, file=sys.stderr, flush=True)
return {}
if seq_file.endswith('.gz'):
os.system('gunzip %s' % seq_file)
seq_file_unzip = seq_file.rstrip('.gz')
else:
seq_file_unzip = seq_file
if Bio_Alphabet:
seq_dict = SeqIO.index(seq_file_unzip, "fasta", IUPAC.ambiguous_dna)
else:
seq_dict = SeqIO.index(seq_file_unzip, "fasta")
# Create a seq_dict ID translation using IDs truncate up to space or 50 chars
seqs = {}
for seq in seq_dict.values():
seqs.update({seq.description: str(seq.seq).upper()})
### .fa files may have a header preceeding each gene. This chunk is added to make sure the header is removed
### can't change the brackets, otherwise keyerror
keys = list(seqs.keys())
# obtain a list of keys stripped of the header
for i in range(len(keys)):
keys[i] = keys[i].replace("lcl|", "", 1)
seqs = dict(zip(keys, list(seqs.values())))
if seq_file.endswith('.gz'):
os.system('gzip %s' % seq_file_unzip)
return seqs
def getCDR(cdrfile):
V_CDR = {}
if not os.path.exists(cdrfile):
logging.warnings('Cannot find CDR boundary file %s' % os.path.basename(cdrfile))
return None
else:
for line in open(cdrfile):
l = line.strip().split()
V_CDR[l[0]] = [int(b) for b in l[1:]]
return V_CDR
def getCSV(csvfile):
# Load CSV file by reading by chunks
tmplist = []
for chunk in pd.read_csv(csvfile, sep='\t', chunksize=20000):
tmplist.append(chunk)
m = pd.concat(tmplist, axis=0)
del tmplist
return m
def load_Valign(fname):
# Load V gene genome alignment position
V_align = {}
for line in open(fname):
l = line.strip().split()
start_end = '%s_%s' % (l[2], l[3])
if l[0] not in V_align:
V_align[l[0]] = {l[1]: [start_end]}
else:
if l[1] in V_align[l[0]]:
V_align[l[0]][l[1]].append(start_end)
else:
V_align[l[0]][l[1]] = [start_end]
return V_align
def CheckAlignOverlap(topinfo, reads_align, Valign, genomealign, hitcheck):
flag = 'noneed'
if genomealign == 'T':
flag = 'unmatch'
if topinfo[0] not in reads_align:
flag = 'nohit'
else:
for loc in reads_align[topinfo[0]]:
chrom = loc[0]
pos = int(loc[1])
if topinfo[1] not in Valign:
flag = 'noVhit'
continue
if chrom in Valign[topinfo[1]]:
for start_end in Valign[topinfo[1]][chrom]:
start = int(start_end.split('_')[0])
end = int(start_end.split('_')[1])
# extend 10bp at 5' because V-D or V-J junctions might have matches
if (start - 10) <= pos <= end:
flag = 'match'
if flag == 'nohit':
return 'No_hit_from_genome_alignment'
elif flag == 'noVhit':
return 'topVgene_has_no_alignment'
elif flag == 'unmatch':
return 'genome_alignment_unmatch_Vgene'
else:
return hitcheck
def loggingRun(cmdline):
logging.info(cmdline)
os.system(cmdline)
def line_count(fname):
i = -1
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def fasta_count(fname):
i = 0
fin = open(fname)
for line in fin:
if line.startswith('>'):
i += 1
fin.close()
return i
def reads_stat(args):
for sample in sample_info:
eachdir = '%s/%s' % (args.outdir, sample)
fstat = open('%s/%s.stat.txt' % (eachdir, sample), 'w')
# reads count
total_num = line_count("%s/%s_R1.fq" % (eachdir, sample)) / 4
join_num = line_count("%s/%s_join.fq" % (eachdir, sample)) / 4
unjoin_num = total_num - join_num
fstat.write('Total reads\t%d\nJoined reads\t%d\nUnjoined reads\t%d\n' % (total_num, join_num, unjoin_num))
# alignment stat
join_uniq = line_count('%s/%s_join.uniq.xls' % (eachdir, sample))
R1_uniq = line_count('%s/%s_unjoinR1.uniq.xls' % (eachdir, sample))
join_NOuniq = line_count('%s/%s_join.NOuniq.xls' % (eachdir, sample))
R1_NOuniq = line_count('%s/%s_unjoinR1.NOuniq.xls' % (eachdir, sample))
mergeNum = line_count('%s/%s.IgBlast_merge.xls' % (eachdir, sample))
fstat.write('# of uniquely/NON-uniquely joined hits\t%d\t%d\n' % (join_uniq, join_NOuniq))
fstat.write('# of uniquely/NON-uniquely unjoined-R1 hits\t%d\t%d\n' % (R1_uniq, R1_NOuniq))
fstat.write('# of merged hits\t%d\n' % mergeNum)
fstat.close()
def random_seq(length):
''' Generate random sequnce with input length '''
seq = ''
if length == 0:
return seq
else:
seq = ''.join([random.choice('ATCG') for i in range(0, length)])
return seq
def mutate_seq(orig_string, mutation_rate=0.005):
''' Mutate input sequence with point mutations '''
bases = "ACGT"
result = []
mutations = []
n = 0
for base in orig_string:
n += 1
if random.random() < mutation_rate and base in bases:
new_base = bases[bases.index(base) - random.randint(1, 3)] # negatives are OK
result.append(new_base)
mutations.append('%s%d%s' % (base, n, new_base))
else:
result.append(base)
return "".join(result), '|'.join(mutations)
def reverse_complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
seq_rc = "".join(complement.get(base, base) for base in reversed(seq))
return seq_rc
def fastq_stats(fastqfile):
# execute fastq-stats
os.system('fastq-stats %s > %s.fastqstat' % (fastqfile, fastqfile))
# parse results
fqstat = {}
for line in open('%s.fastqstat' % fastqfile):
l = line.strip().split('\t')
fqstat[l[0]] = l[1]
os.system('rm -rf %s.fastqstat' % fastqfile)
return fqstat
def parsefa_long(file, length):
id_seq = {}
id = ''
for line in open(file):
if line.startswith('>'):
id = line.strip()
id_seq[id] = ''
else:
id_seq[id] += seq
fout = open(file.replace('.fa', '.long.fa'), 'w')
for id in id_seq:
if len(id_seq[id]) >= length:
fout.write('%s\n%s\n' % (id, id_seq[id]))
fout.close()
def smooth(self, nucMutnum, nucCovnum, genetotalseq, statfilelist):
# print(nucMutnum['A'], nucMutnum['G'], nucMutnum['C'], nucMutnum['T'])
# print(nucCovnum['A'], nucCovnum['G'], nucCovnum['C'], nucCovnum['T'])
nucMucratio = {}
smoothpower = self.args.additivesmooth
for nuc in 'AGCT':
nucMucratio[nuc] = float(nucMutnum[nuc]) / nucCovnum[nuc]
avecover = sum([nucCovnum[a] for a in 'AGCT']) / len(genetotalseq)
for gene in statfilelist:
statfile = statfilelist[gene]
statnew = statfile.replace('.txt', '.sm%s.txt' % str(smoothpower))
fnew = open(statnew, 'w')
for line in open(statfile):
if line.startswith('Pos'):
fnew.write(line)
else:
l = line.strip().split('\t')
total_smooth = int(l[2]) + avecover * smoothpower
mut_smooth = int(l[1]) + nucMucratio[nuc] * avecover * smoothpower
if total_smooth == 0:
l[4] = 0
else:
l[4] = mut_smooth / total_smooth
l[4] = str(l[4])
fnew.write('%s\n' % '\t'.join(l))
fnew.close()
pdffile = statnew.replace('nucfile', 'profiles').replace('txt', 'pdf')
### 09152020: changed showsequence from false to ture
loggingRun(
'Rscript scripts/SHMPlot2.R %s %s plotrows=1 figureheight=2 showsequence=TRUE ymax=0.2 cdr1_start=%d cdr1_end=%d cdr2_start=%d cdr2_end=%d cdr3_start=%d cdr3_end=%d' % \
(statnew, pdffile, self.V_CDR[gene]['CDR1_start'], self.V_CDR[gene]['CDR1_end'], \
self.V_CDR[gene]['CDR2_start'], self.V_CDR[gene]['CDR2_end'], \
self.V_CDR[gene]['CDR3_start'], self.V_CDR[gene]['CDR3_end']))
######### this section is for tree construction & file parsing
def mergeSampleCount(shortlist):
samplelist = [s.split(':')[0] for s in shortlist[0].split('|')]
sample_count = {}
for s in samplelist:
sample_count[s] = 0
for shortcount in shortlist:
for oneshort in shortcount.split('|'):
(a, b) = oneshort.split(':')
sample_count[a] = sample_count[a] + int(b)
o = '|'.join(["%s:%d" % (a, sample_count[a]) for a in samplelist])
return o
def treeCollapseParse(fin, fout):
db = pd.read_csv(fin, sep="\t", low_memory=False)
if len(db) < 2: sys.exit('Find no passed read in tmp_db-pass.tab')
grouped = db.groupby('CLONE')
idlist = []
sclist = []
readlist = []
fullseqlist = []
for key, group in grouped:
seqlist = []
group = pd.DataFrame(group)
germseq = list(group['GERMLINE_IMGT_D_MASK'])[0]
for si in group['SEQUENCE_IMGT']:
s = []
for n in range(0, len(si)):
if si[n] in ['N', '.'] and germseq[n] != 'N':
s.append(germseq[n])
else:
s.append(si[n])
seqlist.append(''.join(s))
group["FULLSEQ"] = seqlist
grouped2 = group.groupby("FULLSEQ")
for subkey, subgroup in grouped2:
subgroup = pd.DataFrame(subgroup)
subgroup["trimlen"] = [len(s.replace('.', '').replace('N', '')) for s in subgroup['SEQUENCE_IMGT']]
subgroup = subgroup.sort_values("trimlen", ascending=False)
idlist.append(list(subgroup['SEQUENCE_ID'])[0])
fullseqlist.append(list(subgroup['FULLSEQ'])[0])
readlist.append('|'.join(list(subgroup['SEQUENCE_ID'])))
sclist.append(mergeSampleCount(list(subgroup['SHORTCOUNT'])))
treeCollapse = pd.DataFrame(db.loc[db['SEQUENCE_ID'].isin(idlist),])
treeCollapse["SHORTCOUNT"] = sclist
# treeCollapse["SEQUENCE_IMGT"] = fullseqlist
# treeCollapse["READGROUP"] = readlist
treeCollapse.to_csv(fout, sep="\t", index=False)
def files_process(args, worktype):
# IgBlast clean up
if worktype == 'igblast_clean':
for sample in args.metadict:
eachdir = '%s/%s' % (args.outdir, sample)
dirlist = ['reads_fasta', 'reads_fastq', 'igblast_raw',
'igblast_db'] # , 'bowtie_sam']
for d in dirlist:
if not os.path.exists('%s/%s' % (eachdir, d)):
os.system('mkdir %s/%s' % (eachdir, d))
os.system('mv {0}/*fa {0}/reads_fasta'.format(eachdir))
os.system('mv {0}/*.fq {0}/*list {0}/reads_fastq'.format(eachdir))
os.system('mv {0}/*IgBlast {0}/igblast_raw'.format(eachdir))
os.system('mv {0}/*IgBlast.db {0}/igblast_db'.format(eachdir))
# if args.genomealign == 'T':
# os.system('mv %s/*.sam %s/bowtie_sam' % (eachdir, eachdir))
# JH 05042021
# os.system('gzip %s/reads_fast*/*' % (eachdir))
os.system('gzip -f %s/reads_fasta/*.fa' % (eachdir))
os.system('gzip -f %s/reads_fastq/*.fq' % (eachdir))
os.system('gzip -f %s/reads_fastq/*.list' % (eachdir))
# os.system('gzip %s/igblast/*' % eachdir)
os.system('gzip -f %s/igblast_db/*.IgBlast.db' % eachdir)
os.system('gzip -f %s/igblast_raw/*.IgBlast' % eachdir)
if os.path.exists('%s/unmatched/' % args.outdir):
os.system('gzip -q %s/unmatched/*' % args.outdir)
def getNmers(sequences, n):
"""
Breaks input sequences down into n-mers
Arguments:
sequences : List of sequences to be broken into n-mers
n : Length of n-mers to return
n == 1
Returns:
dict : Dictionary mapping sequence to a list of n-mers
"""
# Add Ns so first nucleotide is center of first n-mer
sequences_n = ['N' * ((n - 1) // 2) + seq + 'N' * ((n - 1) // 2) for seq in sequences]
nmers = {}
for seq, seqn in zip(sequences, sequences_n):
nmers[seq] = [seqn[i:i + n] for i in range(len(seqn) - n + 1)]
# nmers = {(seq, [seqn[i:i+n] for i in range(len(seqn)-n+1)]) for seq,seqn in izip(sequences,sequences_n)}
return nmers
def scoreDNA(a, b, mask_score=None, gap_score=None):
"""
Returns the score for a pair of IUPAC Ambiguous Nucleotide characters
Arguments:
a : First characters
b : Second character
n_score : Tuple of length two defining scores for all matches against an N
character for (a, b), with the score for character (a) taking precedence;
if None score symmetrically according to IUPAC character identity
gap_score : Tuple of length two defining score for all matches against a gap (-, .)
character for (a, b), with the score for character (a) taking precedence;
if None score symmetrically according to IUPAC character identity
Returns:
int : Score for the character pair
"""
# Define ambiguous character translations
IUPAC_trans = {'AGWSKMBDHV': 'R', 'CTSWKMBDHV': 'Y', 'CGKMBDHV': 'S', 'ATKMBDHV': 'W', 'GTBDHV': 'K',
'ACBDHV': 'M', 'CGTDHV': 'B', 'AGTHV': 'D', 'ACTV': 'H', 'ACG': 'V', 'ABCDGHKMRSTVWY': 'N',
'-.': '.'}
# Create list of tuples of synonymous character pairs
IUPAC_matches = [p for k, v in IUPAC_trans.items() for p in list(product(k, v))]
# Check gap and N-value conditions, prioritizing score for first character
if gap_score is not None and a in '-.':
return gap_score[0]
elif mask_score is not None and a in 'nN':
return mask_score[0]
elif gap_score is not None and b in '-.':
return gap_score[1]
elif mask_score is not None and b in 'nN':
return mask_score[1]
# Return symmetric and reflexive score for IUPAC match conditions
if a == b:
return 1
elif (a, b) in IUPAC_matches:
return 1
elif (b, a) in IUPAC_matches:
return 1
else:
return 0
def getDNADistMatrix(mat=None, mask_dist=0, gap_dist=0):
"""
Generates a DNA distance matrix
Specifies a matrix of distance scores eg A==A=> similarity score=1 / distance =0
Arguments:
mat : Input distance matrix to extend to full alphabet;
if unspecified, creates Hamming distance matrix that incorporates
IUPAC equivalencies
mask_dist : Distance for all matches against an N character
gap_dist : Distance for all matches against a gap (-, .) character
Returns:
DataFrame : pandas.DataFrame of distances
"""
IUPAC_chars = list('-.ACGTRYSWKMBDHVN')
mask_char = 'N'
# Default matrix to inf
dist_mat = pd.DataFrame(float('inf'), index=IUPAC_chars, columns=IUPAC_chars,
dtype=float)
# Set gap distance
for c in '-.':
dist_mat.loc[c] = dist_mat.loc[:, c] = gap_dist
# Set mask distance
dist_mat.loc[mask_char] = dist_mat.loc[:, mask_char] = mask_dist
# Fill in provided distances from input matrix
if mat is not None:
for i, j in product(mat.index, mat.columns):
dist_mat.at[i, j] = mat.at[i, j]
# If no input matrix, create IUPAC-defined Hamming distance
else:
for i, j in product(dist_mat.index, dist_mat.columns):
dist_mat.at[i, j] = 1 - scoreDNA(i, j,
mask_score=(1 - mask_dist, 1 - mask_dist),
gap_score=(1 - gap_dist, 1 - gap_dist))
return dist_mat
pass
def calcDistances(sequences, n, dist_mat, norm, sym):
"""
Calculate pairwise distances between input sequences
Arguments:
sequences : List of sequences for which to calculate pairwise distances
n : Length of n-mers to be used in calculating distance
dist_mat : pandas.DataFrame of mutation distances
norm : Normalization method
sym : Symmetry method
Returns:
ndarray : numpy matrix of pairwise distances between input sequences
"""
# Initialize output distance matrix
dists = np.zeros((len(sequences), len(sequences)))
# Generate dictionary of n-mers from input sequences
nmers = getNmers(sequences, n)
# Iterate over combinations of input sequences
for j, k in combinations(list(range(len(sequences))), 2):
# Only consider characters and n-mers with mutations
# nmer==seq == [list of bases in seqs_uniq]
# mutated==where seq1 != seq2
# in our case no need for dist_mat;add the number of diff aa and norm by aa len=>distance
mutated = [i for i, (c1, c2) in enumerate(zip(sequences[j], sequences[k])) if c1 != c2]
seq1 = [sequences[j][i] for i in mutated]
seq2 = [sequences[k][i] for i in mutated]
nmer1 = [nmers[sequences[j]][i] for i in mutated]
nmer2 = [nmers[sequences[k]][i] for i in mutated]
# Determine normalizing factor
if norm == 'len':
norm_by = len(sequences[0])
elif norm == 'mut':
norm_by = len(mutated)
else:
norm_by = 1
# Determine symmetry function
if sym == 'avg':
sym_fun = np.mean
elif sym == 'min':
sym_fun = min
else:
sym_fun = sum
# Calculate distances
try:
dists[j, k] = dists[k, j] = \
sum([sym_fun([dist_mat.at[c1, n2], dist_mat.at[c2, n1]]) \
for c1, c2, n1, n2 in zip(seq1, seq2, nmer1, nmer2)]) / \
(norm_by)
except (KeyError):
raise KeyError('Unrecognized character in sequence.')
return dists
def formClusters(dists, link, distance):
"""
Form clusters based on hierarchical clustering of input distance matrix with
linkage type and cutoff distance
Arguments:
dists : numpy matrix of distances
link : Linkage type for hierarchical clustering
distance : Distance at which to cut into clusters
Returns:
list : List of cluster assignments
"""
# Make distance matrix square
# squareform turns square matrix to vector, or vector to square matrix
dists = squareform(dists)
# Compute linkage
links = linkage(dists, link)
# Break into clusters based on cutoff
clusters = fcluster(links, distance, criterion='distance')
return clusters
def hier_clust(group, distance):
"""
distance = 0.1 in Yuxiang/Huan
Form clusters based on hierarchical clustering of input distance matrix with
linkage type and cutoff distance
"""
# This line was never used --> commented out JH 06032021
# dict_seqID = group.set_index('CDR3_MASK').to_dict()['SEQUENCE_ID']
seqs = group['CDR3_MASK'].tolist()
IDs = group['SEQUENCE_ID'].tolist()
seqs_uniq = list(set(seqs))
seq_map = {}
for key, row in group.iterrows():
seq = row['CDR3_MASK']
ID = row['SEQUENCE_ID']
seq_map.setdefault(seq, []).append(ID)
if len(seqs_uniq) == 1:
clone_tmp = [IDs[0] for i in range(len(IDs))]
else:
# dist_mat is a scoring matrix that specifies the distance bewteen pairs of chars
dist_mat = getDNADistMatrix(mask_dist=0, gap_dist=0)
dists = calcDistances(seqs_uniq, 1, dist_mat, 'len', 'avg')
# Perform hierarchical clustering
lineage = 'single' # the shorted distance
clusters = formClusters(dists, lineage, distance)
# Turn clusters into clone dictionary
clone_dict = {}
for i, c in enumerate(clusters):
cdr3seq = seqs_uniq[i]
for seq_id in seq_map[cdr3seq]:
clone_dict[seq_id] = c
# clone_dict.setdefault(c, []).extend(seq_map[seqs_uniq[i]])
clone_tmp = ['%s_%d' % (IDs[0], clone_dict[seq_id]) for seq_id in IDs]
return clone_tmp
def hier_clust_CDR3_PEPTIDE(group, distance):
"""
distance = 0.1 in Yuxiang/Huan
Form clusters based on hierarchical clustering of input distance matrix with
linkage type and cutoff distance, based on CDR3 aa sequence
"""
def calcDistances_AA(sequences, norm):
"""
Calculate pairwise distances between input peptide sequences
Arguments:
sequences : List of sequences for which to calculate pairwise distances
n : Length of n-mers to be used in calculating distance
dist_mat : pandas.DataFrame of mutation distances
norm : Normalization method
sym : Symmetry method
Returns:
ndarray : numpy matrix of pairwise distances between input sequences
"""
# Initialize output distance matrix
dists = np.zeros((len(sequences), len(sequences)))
# Iterate over combinations of input sequences
for j, k in combinations(list(range(len(sequences))), 2):
# Find locations with mutations
mutated = [i for i, (c1, c2) in enumerate(zip(sequences[j], sequences[k])) if c1 != c2]
seq1 = [sequences[j][i] for i in mutated]
seq2 = [sequences[k][i] for i in mutated]
# Determine normalizing factor
if norm == 'len':
norm_by = len(sequences[0])
else:
norm_by = 1
# Calculate distances
try:
dists[j, k] = dists[k, j] = \
sum([1 if c1 != c2 else 0 for c1, c2 in zip(seq1, seq2)]) / \
(norm_by)
except (KeyError):
raise KeyError('Unrecognized character in sequence.')
return dists
seqs = group['CDR3_PEPTIDE'].tolist()
IDs = group['SEQUENCE_ID'].tolist()
seqs_uniq = list(set(seqs))
seq_map = {}
for key, row in group.iterrows():
seq = row['CDR3_PEPTIDE']
ID = row['SEQUENCE_ID']
seq_map.setdefault(seq, []).append(ID)
if len(seqs_uniq) == 1:
clone_tmp = [IDs[0] for i in range(len(IDs))]
else:
dists = calcDistances_AA(seqs_uniq, 'len')
# Perform hierarchical clustering
lineage = 'single' # the shorted distance
clusters = formClusters(dists, lineage, distance)
# Turn clusters into clone dictionary
clone_dict = {}
for i, c in enumerate(clusters):
cdr3seq = seqs_uniq[i]
for seq_id in seq_map[cdr3seq]:
clone_dict[seq_id] = c
clone_tmp = ['%s_%d' % (IDs[0], clone_dict[seq_id]) for seq_id in IDs]
return clone_tmp
def getGermdict(args):
''' Read VDJ IgBlast database and obtain un-gapped germline sequences
2020/09 JH: Add in a check condition to ensure databases are read properly
'''
germ_dict = {}
try:
Vdb = getInputSeq(args.params_dict['Vdb'])
except AttributeError:
Vdb = getInputSeq(args.Vdb)
try:
Ddb = getInputSeq(args.params_dict['Ddb'])
except AttributeError:
Ddb = getInputSeq(args.Ddb)
try:
Jdb = getInputSeq(args.params_dict['Jdb'])
except AttributeError:
Jdb = getInputSeq(args.Jdb)
if not bool(Vdb):
print('[getGermdict] Vdb is empty... FAILED TO LOAD %s' % args.params_dict['Vdb'], file = sys.stderr, flush=True)
else:
germ_dict.update(Vdb)
if not bool(Ddb):
print('[getGermdict] Vdb is empty... FAILED TO LOAD %s' % args.params_dict['Ddb'], file = sys.stderr, flush=True)
else:
germ_dict.update(Ddb)
if not bool(Jdb):
print('[getGermdict] Vdb is empty... FAILED TO LOAD %s' % args.params_dict['Jdb'], file = sys.stderr, flush=True)
else:
germ_dict.update(Jdb)
return germ_dict
def collapse_db(records, collapse_type, N_Diff):
'''
Collapse reads Db file
Input:
Records: read dataframe
collaspe_type:
'identical' -- collpase input sequences are identical
'partial' -- collapse shorter sequences to longer ones
'V1' -- collapse sequences by multiple columns
N_Diff:
'T' consider N as difference
'F' consider N as not difference
Output:
'''
def _writeOutputSeq(filter_check, col_ID, col_seq, output_file, db):
"""
Generate output fasta file using db dataframe
"""
output_handle = open(output_file, 'w')
for key, row in db.iterrows():
output_handle.write('>%s\n%s\n' % (row[col_ID], row[col_seq]))
output_handle.close()
def _collapse_identical_NasDiff(records):
def __parse_group(group):
index_dupreads = ','.join(group['SEQUENCE_ID'])
# print("index_dupreads",index_dupreads, file = sys.stderr)
# print("nested function can print to console?", file=sys.stderr)
### 20200916 Lawrence: updated .ix to .loc
top_series = group.loc[group.index[0]]
top_series['DUPREAD'] = index_dupreads
return top_series
#return index_dupreads
#print("keys in records during collapse:", records.keys(), file=sys.stderr)
# YES
#print("records.shape during collapse:", records.shape, file=sys.stderr)
#print("SEQUENCE_INPUT.size in records:", records['SEQUENCE_INPUT'].size, file=sys.stderr)
grouped = records.groupby('SEQUENCE_INPUT')
#print("grouped.ngroups:", grouped.ngroups, file=sys.stderr)
#print("grouped.ndim", grouped.ndim, file=sys.stderr)
colnames = list(records) + ['DUPREAD']
records_collapse = pd.DataFrame(columns=colnames, index=range(0, len(grouped)))
### __parse_group does it work outside of grouped.apply
records_collapse = grouped.apply(__parse_group)
# records_collapse.size = 0
# print("records_collapse after apply:", records_collapse, file=sys.stderr)
# EMPTY DATAFREAM
#print("records_collapse.size:?", records_collapse.size, file=sys.stderr)
#print("records_collapse.keys():", records_collapse.keys(), file=sys.stderr)
return records_collapse
# grouped = records.groupby('SEQUENCE_INPUT')
# index_dupreads = {}
# indexList = []
# for key, group in grouped:
# idx = group.index[0]
# indexList.append(idx)
# index_dupreads[idx] = ','.join(group['SEQUENCE_ID'])
# records_collapse = records.loc[indexList]
# for idx in index_dupreads:
# records_collapse.ix[idx, 'DUPREAD'] = index_dupreads[idx]
# return records_collapse
# def _parse_read(row, records_collect):
# # Keep read with 'N'
# if 'N' in row['SEQUENCE_INPUT']:
# records_collect = records_collect.append(row)
# return records_collect
# else:
# records_cdr3 = records_collect[records_collect['CDR3_SEQ']==row['CDR3_SEQ']]
# for key,collect in records_cdr3.iterrows():
# if row['SEQUENCE_INPUT'] in collect['SEQUENCE_INPUT']:
# records_collect.ix[key, 'DUPREAD'] += ',%s' % row['DUPREAD']
# return records_collect
# records_collect = records_collect.append(row)
# return records_collect
#
# def _collapse_partial_NasDiff(records):
# colnames = list(records) #+ ['N']
# records_collect = pd.DataFrame(columns=colnames)
# for key,row in records.iterrows():
# records_collect = _parse_read(row, records_collect)
# return records_collect
def _collapse_partial_NasDiff(records):
''' Collapse shorter reads to longer ones
Process: check read one by one to see if its input seq is a substring of stored reads
Need a new method to speed up this
'''
records_collect = pd.DataFrame(columns=list(records))
### 20200916 Lawrence: updated .ix to .loc
records_collect.loc[0,] = records.loc[records.index[0]]
for key, row in records.iterrows():
if key != records.index[0]:
inputseq = row['SEQUENCE_INPUT']
j = pd.Series(records_collect['SEQUENCE_INPUT']).str.contains(inputseq)
if len(j[j == True]) >= 1:
i = j[j == True].index[0]
records_collect.loc[i, 'DUPREAD'] += ',%s' % row['DUPREAD']
elif len(j[j == True]) == 0:
records_collect.loc[len(records_collect) + 1,] = row
return records_collect
def _parse_SAM(read_readlen, sam_file, collapse_type):
inputR_refR = {}
for line in open(sam_file):
l = line.strip().split()
if l[5] == '%dM' % read_readlen[l[0]]:
if collapse_type == 'identical':
if l[5] == '%dM' % read_readlen[l[2]]:
inputR_refR[l[0]] = l[2]
else:
inputR_refR[l[0]] = l[2]
return inputR_refR
def _collapse_V1(records):
''' Collapse reads based on various result columns
'''
records_new = pd.DataFrame(columns=list(records))
grouplist = ['V_ALLELE', 'D_ALLELE', 'J_ALLELE', 'STOP', 'IN_FRAME', \
'V_END', 'V_D_JUNCTION', 'D_REGION', 'D_J_JUNCTION', \
'J_START', 'V_J_JUNCTION']
for key, group in records.groupby(grouplist):
dup = ','.join(group['DUPREAD'].values.tolist())
groupcontent = group.iloc[0]
groupcontent['DUPREAD'] = dup
records_new.loc[len(records_new) + 1,] = groupcontent
return records_new
def _collapse_NasNoDiff(records, collapse_type):
''' Required Bowtie2 software
'''
randname = str(random.randint(1, 1000000))
# Write with/wo 'N' two fa files as input/ref files in bowtie2 searching
records_woN = records[~records['SEQUENCE_INPUT'].str.contains("N")]
records_wN = records[records['SEQUENCE_INPUT'].str.contains("N")]
if len(records_woN) == 0 or len(records_wN) == 0:
return records
ref_file = '%s.ref' % randname
input_file = '%s.input' % randname
_writeOutputSeq('woN', 'SEQUENCE_ID', 'SEQUENCE_INPUT', ref_file, records_woN)
_writeOutputSeq('wN', 'SEQUENCE_ID', 'SEQUENCE_INPUT', input_file, records_wN)
sam_file = '%s.sam' % randname
os.system('bowtie2-build %s %s -q' % (ref_file, ref_file))
os.system('bowtie2 -x ./%s -f -U %s --local -S %s --no-head --np 0 --mp 1000 --rdg 1000,1000 --rfg 1000,1000' % \
(ref_file, input_file, sam_file))
read_readlen = records.set_index('SEQUENCE_ID').to_dict()['INPUT_LEN']
inputR_refR = _parse_SAM(read_readlen, sam_file, collapse_type)
records_collapsed = records[~records.SEQUENCE_ID.isin(inputR_refR.keys())].copy()
records_waitToCollapse = records[records.SEQUENCE_ID.isin(inputR_refR.keys())]
for inputR in inputR_refR:
refR = inputR_refR[inputR]
dup = records_waitToCollapse.loc[records_waitToCollapse['SEQUENCE_ID'] == inputR, 'DUPREAD'].values[0]
records_collapsed.loc[records_collapsed['SEQUENCE_ID'] == refR, 'DUPREAD'] += ',%s' % dup
os.system('rm -rf %s %s %s %s*bt2' % (ref_file, input_file, sam_file, ref_file))
return records_collapsed
# Main part in this func
# Collapse identical reads anyway
#print("records.size before collapse?", records.size, file=sys.stderr)
records = _collapse_identical_NasDiff(records)
#print("have all columns after collapse?", records, file=sys.stderr)
#print("shape of records after collapse:", records.shape, file=sys.stderr)
#print("columns of records after collapse:", records.columns, file=sys.stderr)
# [0,0]
records['INPUT_LEN'] = records["SEQUENCE_INPUT"].map(len)
records.sort_values('INPUT_LEN', ascending=False, inplace=True)
# Collapse identical reads with N as no difference
if collapse_type == 'identical' and N_Diff == 'F':
records = _collapse_NasNoDiff(records, 'identical')
elif collapse_type == 'partial':
# Collapse shorter reads to longer ones with N as difference
records = _collapse_partial_NasDiff(records)
if N_Diff == 'F':
# Collapse shorter reads to longer ones with N as no difference
records = _collapse_NasNoDiff(records, 'partial')
records = records.drop('INPUT_LEN', axis=1)
elif collapse_type == 'V1':
# V1 means same way as Version One pipeline
records = _collapse_NasNoDiff(records, 'identical')
records = _collapse_V1(records)
return records
def profile_DNAmut(group, nuc_stat, nuc_PDF, nuc_profile, args):
''' Prep DNA mutation profile, text and PDF file
2021/06 JH: Add in debugging lines to ensure databases are read properly
'''
def _parse_V_ALLELE_NUC(row):
return pd.Series([row["SEQUENCE_ID"]] + [s for s in row['V_ALLELE_NUC']])
allele = group["V_ALLELE"].unique()[0]
# print(f"allele = {allele}")
germ_dict = getGermdict(args)
if allele == "VH5-4(VH7183.a4.6":
allele_seq = germ_dict["VH5-4(VH7183.a4.6)"]
else:
try:
allele_seq = germ_dict[allele]
except KeyError:
print(f'[profile_DNAmut]: cannot find allele {allele} in Germdict when running nuc_profile for {nuc_profile}')
try:
print(f'[profile_DNAmut]: current Vdb is {args.params_dict["Vdb"]}', file = sys.stderr, flush=True)
except AttributeError:
print(f'[profile_DNAmut]: current Vdb is {args.Vdb}', file=sys.stderr, flush=True)
raise
allele_len = len(allele_seq)
colnames = ['ID'] + [l for l in allele_seq]
allele_mut = pd.DataFrame(columns=colnames, index=range(0, len(group)))
allele_mut = group.apply(_parse_V_ALLELE_NUC, axis=1)
statnames = ['Pos', 'Mut', 'Total', 'Base', 'Y', 'A', 'T', 'C', 'G']
allele_stat = pd.DataFrame(columns=statnames, index=range(1, allele_len + 1))
allele_stat['Pos'] = range(1, allele_len + 1)
allele_stat['Base'] = [l for l in allele_seq]
allele_stat[['Mut', 'Total', 'Y', 'A', 'T', 'C', 'G']] = 0
for i in range(1, allele_len + 1):
if len(allele_mut) == 1:
counts = {}
counts[allele_mut[[i]].squeeze()] = 1
else:
counts = allele_mut[[i]].squeeze().value_counts()
countA = counts.get('A', 0)
countT = counts.get('T', 0)
countC = counts.get('C', 0)
countG = counts.get('G', 0)
countMut = countA + countT + countC + countG
countTotal = countMut + counts.get('.', 0)
allele_stat.loc[i, 'Mut'] = countMut
allele_stat.loc[i, 'Total'] = countTotal
allele_stat.loc[i, 'Y'] = float(countMut / countTotal) if countTotal > 0 else 0
allele_stat.loc[i, 'A'] = countA
allele_stat.loc[i, 'T'] = countT
allele_stat.loc[i, 'C'] = countC
allele_stat.loc[i, 'G'] = countG
allele_mut.to_csv(nuc_profile, sep="\t", index=False)
allele_stat.to_csv(nuc_stat, sep="\t", index=False)
# run R scripts
if allele in args.__dict__['V_CDR']:
cdr = args.__dict__['V_CDR'][allele]
cdrstring = 'cdr1_start=%s cdr1_end=%s cdr2_start=%s cdr2_end=%s ' \
'cdr3_start=%s cdr3_end=%s' % (cdr[0], cdr[1], cdr[2],
cdr[3], cdr[4], cdr[5])
else:
cdrstring = ''
# Filter group with read number
if len(group) >= args.min_profileread:
sample = group["SAMPLE"].unique()[0]
anno = '_'.join([sample, allele])
### 20200915 Lawrence: changed showsequence from false to true
### 20200916 Lawrence: changed frpm 'Rscript %s/HTGTSrep/R/SHMPlot2.R %s %s plotrows=1 figureheight=2 showsequence=TRUE ymax=%f %s annotation=%s '
### to 'Rscript %s/HTGTSrep/R/SHMPlot2.R \"%s\" \"%s\" plotrows=1 figureheight=2 showsequence=TRUE ymax=%f %s annotation=\"%s\" '
### this allows special characters to be in V_allel names
os.system('Rscript %s/HTGTSrep/R/SHMPlot2.R \"%s\" \"%s\" plotrows=1 figureheight=2 '
'showsequence=TRUE ymax=%f %s annotation=\"%s\" ' % (args.scriptdir, nuc_stat,
nuc_PDF, args.ymax_DNA, cdrstring, anno))
def getInferSeq(treefile, group):
''' Read tree file and get inferred 1 sequence,
Using germline sequence of V and J to ensure no surprise
'''
n = 0
print("in getInferSeq, treefile:", treefile, file = sys.stderr)
with open(treefile) as f:
for line in f:
n += 1
l = line.strip().split()
if n == 2: inferseq = l[-1]
# get germline parts
cdr3 = group.iloc[0]['CDR3_MASK']
sequence_imgt = group.iloc[0]['SEQUENCE_IMGT']
Vpos = sequence_imgt.find(cdr3)
germline_imgt_seq = group.iloc[0]['GERMLINE_IMGT_D_MASK']
seq_V = germline_imgt_seq[:Vpos]
seq_Junc = inferseq[Vpos:Vpos + len(cdr3)]
if len(germline_imgt_seq) >= len(inferseq):
seq_J = germline_imgt_seq[Vpos + len(cdr3): len(inferseq)]
else:
seq_J = inferseq[Vpos + len(cdr3): len(inferseq)]
# Use V and J parts from germline as reference to avoid mismatch at these regions in mut profiling
newinfer = (seq_V + seq_Junc + seq_J).replace('.', 'N')
# print(group['CLONE'].tolist()[0], inferseq, newinfer)
return newinfer
def profile_DNAmut_clonal(inferseq, group, nuc_stat, nuc_PDF, nuc_profile, args):
''' Prep DNA mutation profile, text and PDF file
'''
allele = group["V_CALL"].unique()[0]
# Get position list of inferred seq which are not 'N'
poslist = [i for i in range(0, len(inferseq)) if inferseq[i] != 'N']
allele_seq = ''.join([inferseq[i] for i in poslist])
allele_len = len(poslist)
colnames = ['ID'] + [inferseq[i] for i in poslist]
allele_mut = | pd.DataFrame(columns=colnames) | pandas.DataFrame |
# encoding: utf-8
import logging
import re
from io import BytesIO
from zipfile import ZipFile
from collections import OrderedDict
import pandas as pd
from urllib.request import urlopen
import os.path
from .helpers import pitch_count, progress, game_state
from .version import __version__
from .event import event
class parse_row(object):
""" Parse one single row
- A row can return only one type of data (id, version, start, play, sub, com, data)
"""
def __init__(self):
self.log = logging.getLogger(__name__) #initialize logging
self.row_str =''
self.row_values = []
self.row_results = {}
self.row_data = []
self.row_id = []
def _clean_row(self):
self.row_str = self.row_str.decode("utf-8")
self.row_values = self.row_str.rstrip('\n').split(',')
self.row_values = [x.replace('\r','').replace('"','') for x in self.row_values]
def read_row(self):
self._clean_row()
self.row_id = self.row_values[0] #string
self.row_data = self.row_values[1:] #list
class parse_game(parse_row):
""""Object for each baseball game, subclass.
- Data is expected to be sequentially passed (Retrosheet format)
- When this class is initialized, it restarts all stats for the game
"""
def __init__(self, id=''):
self.log = logging.getLogger(__name__) #initialize logging
parse_row.__init__(self)
self.location = 0
self.has_started = False
self.has_finished = False
self.current_inning = '1' #starting of game
self.current_team = '0' #starting of game
self.score = {'1':0,'0':0}
self.current_pitcher = {'1':'','0':''} #1 for home, 0 for away
self.pitch_count = {'1':0,'0':0} #1 for home, 0 for away
self.game = {
'meta': {'filename': '', '__version__': __version__, 'events':''},
'id': id,
'version': '',
'starting_lineup':{'1': {}, '0': {}}, #static for each game
'playing_lineup':{'1': {}, '0': {}}, #dynamic, based on subs
'info': [], #'start': [],
'play_data': [],
'play_player': [], #pitching_id | batter_id | player_id | event | value |
#'sub': [],
'com': [],
'data': [],
'stats': {'pitching':[], 'batting':[], 'fielding': [], 'running':[]}
}
self.event = event()
self.event.base = {'B': None,'1': None,'2': None,'3': None,'H': []}
self.event.advances = {'B': 1,'1': 0,'2': 0,'3': 0,'H': 0, 'out': 3, 'run': 0}
def parse_start(self, start_sub = 'start'):
""" This will happen before the game starts"""
fielding_position = self.row_values[5]
player_id = self.row_values[1]
home_away = self.row_values[-3][-1] #some entires are '01'
try:
self.current_pitcher[home_away] = player_id if fielding_position == '1' else self.current_pitcher[home_away]
self.pitch_count[home_away] = 0 if fielding_position == '1' else self.pitch_count[home_away]
except:
self.log.debug('Something wrong with {0} home_away pitcher in {1}, {2}'.format(self.game['id'], start_sub, self.row_values))
self.game['playing_lineup'][home_away][fielding_position] = player_id
if start_sub == 'start':
self.game['starting_lineup'][home_away][fielding_position] = player_id
def parse_play(self):
"""
-----------------------------------------------------------------------------------------
field format: "play | inning | home_away | player_id | count on batter | pitches | play "|
index counts: 0 1 2 3 4 5 6 |
------------------------------------------------------------------------------------------
"""
self.event.str = self.row_values[6] #pass string to parse values
if self.current_team != self.row_values[2]:
self.score[self.current_team] += self.event.advances['run']
self.event.base = {'B': None,'1': None,'2': None,'3': None, 'H': []} #players on base
if self.event.advances['out'] != 3: #catching errors
self.log.warning('INNING NO 3RD OUT:\tGame: {0}\tteam: {1}\tinning{2}\tout: {3}'.format(self.game['id'], self.current_team, self.current_inning, self.event.advances['out']))
self.event.advances={'B': 1,'1': 0,'2': 0,'3': 0,'H': 0, 'out': 0,'run': 0} if self.event.advances['out'] >= 3 else self.event.advances
self.event.base['B'] = self.row_values[3] #current at bat
base_before_play = self.event.base.copy()
pre_event = self.event.advances.copy()
self.event.decipher()
post_event = self.event.advances.copy()
this_play_runs = post_event['run'] - pre_event['run']
this_play_outs = post_event['out'] - pre_event['out']
pre_state, post_state = game_state(pre_event, post_event)
if post_state == 25:
states = [25,26,27,28]
post_state = states[this_play_runs]
pitcher_home_away = '1' if self.row_values[2] == '0' else '0' #remember picher is defense
pitch_string = self.row_values[3]
self.pitch_count[pitcher_home_away] = pitch_count(self.row_values[5], self.pitch_count[pitcher_home_away])
self.current_inning = self.row_values[1]
self.current_team = self.row_values[2] if self.row_values[2] in ['0','1'] else self.current_team
if self.event.str != 'NP': #only append if plays happened (skip subs(NP) on play file)
self.game['play_data'].append({
'game_id': self.game['id'],
'order': self.location,
'pitcher': self.current_pitcher[pitcher_home_away],
'pitch_count': self.pitch_count[pitcher_home_away],
'inning': self.current_inning,
'team': self.current_team,
'player_id': self.row_values[3],
'count_on_batter': self.row_values[4],
'pitch_str': self.row_values[5],
'play_str': self.row_values[6],
'B': self.event.advances['B'],
'1': self.event.advances['1'],
'2': self.event.advances['2'],
'3': self.event.advances['3'],
'H': self.event.advances['H'],
'run': self.event.advances['run'],
'out': self.event.advances['out'],
'on-B': self.event.base['B'],
'on-1': self.event.base['1'],
'on-2': self.event.base['2'],
'on-3': self.event.base['3'],
'on-H': self.event.base['H'],
'hometeam_score': self.score['1'],
'awayteam_score': self.score['0'],
'trajectory': self.event.modifiers['trajectory'],
'passes': self.event.modifiers['passes'],
'location': self.event.modifiers['location'],
'pre_state': pre_state,
'post_state': post_state,
'play_runs': this_play_runs,
'play_outs': this_play_outs
})
#import stats for the play
#batting
for bat_stat in self.event.stats['batting']:
bat_stat[1] = self.row_values[3]
self.game['stats']['batting'].append([self.game['id'], self.location] + bat_stat)
#pitching
for pit_stat in self.event.stats['pitching']:
pit_stat[1] = self.current_pitcher[pitcher_home_away]
self.game['stats']['pitching'].append([self.game['id'], self.location] + pit_stat)
#running -- > need to track player together with base
for run_stat in self.event.stats['running']:
run_stat.append(base_before_play[run_stat[1]])#bfrom
self.game['stats']['running'].append([self.game['id'], self.location] + run_stat)
#fielding --> use current positions
fld_home_away = '1' if self.current_team == '0' else '0' #defense is the opposite team
for fld_stat in self.event.stats['fielding']:
try:
fld_stat[1] = self.game['playing_lineup'][fld_home_away][fld_stat[1]]
except:
self.log.debug(fld_stat)
self.game['stats']['fielding'].append([self.game['id'], self.location] + fld_stat)
self.location += 1
def parse_com(self):
self.game['com'].append([self.game['id'], self.location] + self.row_data)
def parse_event(self, row_str):
self.row_str = row_str
self.read_row()
if self.row_id == 'id' or self.row_id == 'version':
self.game[self.row_id] = self.row_data[0]
self.has_started = True
elif self.row_id == 'info':
self.game[self.row_id].append([self.game['id'],self.row_values[1], self.row_values[2]])
elif self.row_id == 'data':
self.has_finished=True
self.game['meta']['events'] = self.location + 1 #0 index
if not self.game['data']:
self.game['info'].append([self.game['id'], 'hometeam_score', self.score['1']])
self.game['info'].append([self.game['id'], 'awayteam_score', self.score['0']])
self.game[self.row_id].append([self.game['id'], self.game['meta']['events']]+self.row_data)
else:
self.parse_start(self.row_id) if self.row_id in ['start','sub'] else None
self.parse_play() if self.row_id == 'play' else None
self.parse_com() if self.row_id == 'com' else None
class parse_games(object):
"""
"""
def __init__(self):
self.log = logging.getLogger(__name__) #initialize logging
self.file = None
self.game_list = []
self.zipfile = None
def get_games(self):
game = parse_game() #files in 1991 start with something other than id
for loop, row in enumerate(self.zipfile.open(self.file).readlines()):
if row.decode("utf-8").rstrip('\n').split(',')[0] == 'id':
game_id = row.decode("utf-8").rstrip('\n').split(',')[1].rstrip('\r')
#start new game
self.game_list.append(game.game) if loop > 0 else None
game = parse_game(game_id)
else:
game.parse_event(row)
def debug_game(self, game_id):
diamond = '''Play: {2}, Inning: {0}, Team: {1} \n|---------[ {5} ]-----------|\n|-------------------------|\n|----[ {6} ]------[ {4} ]-----|\n|-------------------------|\n|------[ {7} ]--[ {3} ]-------|\n|-------------------------|\nRuns: {8}\tOuts: {9}\n'''
for game in self.game_list:
if game['id'] == game_id:
for play in game['play_data']:
print (diamond.format(
play['inning'], play['team'], play['play_str'],
play['B'],
play['1'],
play['2'],
play['3'],
play['H'],
play['run'],
play['out']
))
class parse_files(parse_games):
endpoint = 'https://www.retrosheet.org/events/'
extension = '.zip'
def __init__(self):
parse_games.__init__(self)
self.log = logging.getLogger(__name__)
self.teams_list = []
self.rosters_list = []
def read_files(self):
try: #the files locally:
zipfile = ZipFile(self.filename)
#self.log.debug("Found locally")
except: #take from the web
resp = urlopen(self.endpoint + self.filename)
zipfile = ZipFile(BytesIO(resp.read()))
#self.log.debug("Donwloading from the web")
self.zipfile = zipfile
teams = []
rosters = []
for file in self.zipfile.namelist():
if file[-3:] in ['EVA','EVN']:
self.file = file
self.get_games()
elif file[:4] == 'TEAM':
year = file[4:8]
for row in zipfile.open(file).readlines():
row = row.decode("utf-8")
team_piece = []
for i in range(4): team_piece.append(row.rstrip('\n').split(',')[i].replace('\r',''))
self.teams_list.append([year]+team_piece)
elif file[-3:] == 'ROS': #roster file
year = file[3:7]
for row in zipfile.open(file, 'r').readlines():
row = row.decode("utf-8")
roster_piece = []
for i in range(7): roster_piece.append(row.rstrip('\n').split(',')[i].replace('\r',''))
self.rosters_list.append([year]+roster_piece)
def get_data(self, yearFrom = None, yearTo = None):
"""
"""
yearTo = yearTo if yearTo else '2017'
yearFrom = yearFrom if yearFrom else yearTo
for loop, year in enumerate(range(yearFrom, yearTo+1, 1)):
progress(loop, (yearTo - yearFrom+1), status='Year: {0}'.format(year))
self.log.debug('Getting data for {0}...'.format(year))
self.filename = '{0}eve{1}'.format(year, self.extension)
self.read_files()
progress(1,1,'Completed {0}-{1}'.format(yearFrom, yearTo))
return True
def to_df(self):
"""
"""
plays = []
infos = []
datas = []
lineups = []
battings = []
fieldings = []
pitchings = []
runnings = []
for loop, game in enumerate(self.game_list):
plays += game['play_data']
infos += game['info']
datas += game['data']
battings += game['stats']['batting']
fieldings += game['stats']['fielding']
pitchings += game['stats']['pitching']
runnings += game['stats']['running']
game['starting_lineup']['1']['game_id'] = game['id']
game['starting_lineup']['1']['home_away'] = 'home'
game['starting_lineup']['0']['game_id'] = game['id']
game['starting_lineup']['0']['home_away'] = 'away'
lineups.append(game['starting_lineup']['1'])
lineups.append(game['starting_lineup']['0'])
self.plays = pd.DataFrame(plays)
self.info = pd.DataFrame(infos, columns = ['game_id', 'var', 'value'])
#self.info = self.info[~self.info.duplicated(subset=['game_id','var'], keep='last')].pivot('game_id','var','value').reset_index()
self.lineup = pd.DataFrame(lineups)
self.fielding = pd.DataFrame(fieldings, columns = ['game_id','order','stat','player_id'])
data_df = pd.DataFrame(datas, columns = ['game_id','order','stat','player_id','value'])
self.pitching = | pd.DataFrame(pitchings, columns = ['game_id','order','stat','player_id']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assertRaisesRegexp(ValueError, msg):
index.join(index.to_timestamp())
index3 = | period_range('1/1/2000', '1/20/2000', freq='2D') | pandas.period_range |
# -*- coding: utf-8 -*-
import pandas as pd
d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = | pd.DataFrame(d) | pandas.DataFrame |
import requests
import pandas as pd
from typing import Dict, List, Union, Tuple
PATH_LEGISLATIVAS_2019 = "https://raw.githubusercontent.com/Politica-Para-Todos/ppt-archive/master/legislativas/legislativas-2019/data.json"
# mapping between party and manifesto inside PPT repo
PARTY_TO_MANIFESTO_LEGISLATIVAS_2019 = {
'A': 'alianca_020919.md',
'BE': 'be_120919.md',
'CDS-PP': 'cdspp.md',
'CH': 'CHEGA_201909.md',
'IL': 'Iniciativa Liberal.md',
'L': 'livre.md',
'MAS': 'mas.md',
'NC': 'NOS_CIDADAOS_Set2019.md',
'PCTP/MRPP': 'PCTP.md',
'PCP-PEV': ['PCP.md', 'pev_31082019.md'],
'MPT': 'mpt27092019.md',
'PDR': 'PDR_22092019.md',
'PNR': 'pnr.md',
'PPD/PSD': 'psd.md',
'PS': 'PS_01092019.md',
'PURP': 'PURP.md',
'PAN': 'pan_31082019.md',
'RIR': 'RIR.md'
}
def get_data(path: str) -> Dict:
""" Load the most recent data provided by PPT """
try:
payload = requests.get(path)
assert payload.status_code == 200
return payload.json()
except Exception as e:
print(path)
raise e
def extract_legislativas_2019() -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Extract Portuguese Legislativas 2019 information from PPT community
Will return info regarding parties and regarding candidates
"""
# load data
raw_legislativas_2019 = get_data(PATH_LEGISLATIVAS_2019)
# we do not use this information
raw_legislativas_2019.pop("manifestos")
def _get_manifesto(party) -> Union[str, List]:
manifesto = PARTY_TO_MANIFESTO_LEGISLATIVAS_2019.get(party, "")
# deal with alliances
if isinstance(manifesto, list):
return [
f"https://raw.githubusercontent.com/Politica-Para-Todos/manifestos/master/legislativas/20191006_legislativas/{m}"
for m in manifesto
]
return f"https://raw.githubusercontent.com/Politica-Para-Todos/manifestos/master/legislativas/20191006_legislativas/{manifesto}" if manifesto else ""
parties = []
candidates = []
for party, values in raw_legislativas_2019['parties'].items():
tmp_party = {
"acronym": party.strip(),
"name": values.get("name", ""),
"description": values.get("description", ""),
"description_source": values.get("description_source", ""),
"email": values.get("email", ""),
"facebook": values.get("facebook", ""),
"instagram": values.get("instagram", ""),
"logo": f"https://raw.githubusercontent.com/Politica-Para-Todos/ppt-archive/master/legislativas/legislativas-2019/partidos_logos/{values['logo']}" if "logo" in values else "",
"twitter": values.get("twitter", ""),
"website": values.get("website"),
"manifesto": _get_manifesto(party)
}
# store party info
parties.append(tmp_party)
for district, main_secundary_candidates in values.get("candidates", {}).items():
for c in main_secundary_candidates.get("main", []) + main_secundary_candidates.get("secundary", []):
tmp_candidates = {
"party": party.strip(),
"district": district.strip(),
"name": c.get("name", ""),
"position": c.get("position", ""),
"type": c.get("type", "")
}
if c.get("is_lead_candidate", False):
tmp_candidates.update({
"biography": c.get("biography", ""),
"biography_source": c.get("biography_source", ""),
"link_parlamento": c.get("link_parlamento", ""),
"photo": f"https://raw.githubusercontent.com/Politica-Para-Todos/ppt-archive/master/legislativas/legislativas-2019/cabeca_de_lista_fotos/{c['photo']}" if "photo" in c else "",
"photo_source": c.get("photo_source", ""),
})
# store all candidates
candidates.append(tmp_candidates)
return pd.DataFrame(parties).set_index("acronym"), | pd.DataFrame(candidates) | pandas.DataFrame |
"""Tests for gate.py"""
import numpy as np
import pandas as pd
import xarray as xr
from timeflux.helpers.testing import DummyData, DummyXArray
from timeflux.nodes.gate import Gate
xarray_data = DummyXArray()
pandas_data = DummyData()
node = Gate(event_opens='foo_begins', event_closes='foo_ends', truncate=True)
def test_gate_silent():
pandas_data.reset()
# Send data but no event
node.i.data = pandas_data.next(20)
node.update()
assert node._status == 'closed'
assert node.o.data == None
def test_send_opening_closing_event_in_separate_chunks():
pandas_data.reset()
# Send an opening event
node.clear()
node.i.data = pandas_data.next(5)
time_open = pd.Timestamp('2018-01-01 00:00:00.104507143') # Sync event to second sample
event = pd.DataFrame([['foo_begins']], [time_open], columns=['label']) # Generate a trigger event
node.i_events.data = event
node.update()
expected_data = pd.DataFrame(
[
[0.658783, 0.692277, 0.849196, 0.249668, 0.489425],
[0.221209, 0.987668, 0.944059, 0.039427, 0.705575],
[0.925248, 0.180575, 0.567945, 0.915488, 0.033946],
[0.69742, 0.297349, 0.924396, 0.971058, 0.944266],
],
[
pd.Timestamp('2018-01-01 00:00:00.104507143'),
pd.Timestamp('2018-01-01 00:00:00.202319939'),
| pd.Timestamp('2018-01-01 00:00:00.300986584') | pandas.Timestamp |
# coding=utf-8
import os
import os.path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from loganalysis.const import *
class Log(object):
''' 调度模块Log分析接口类
主要提供如下3类功能:
a) 信息呈现
b)问题发现
c)问题定位
要求所有文件命名符合EI命名格式:子系统_时间.csv
'''
def __init__(self, directory, time_interval=None, product_type='Micro'):
'''初始化Log实例,把所有Log按照类型分类
Args:
directory: Log所在目录
time_interval: 时间范围[start, end],格式为yyyymmddhhmmss
product_type:产品类型['Macro', 'Micro'],默认为micro
'''
self._directory = directory
self._product_type = product_type
self._logfiles={}
self._time_interval = time_interval
@property
def product_type(self):
return self._product_type
@property
def directory(self):
return self._directory
def _filenames_of_type(self, filetype):
'''获取指定文件类型的所有文件名
Args:
filetype:文件类型
time_interval: 时间范围[start, end],格式为yyyymmddhhmmss
Returns:
文件名列表
'''
names_of_filetype = []
for name in np.sort(os.listdir(self._directory)):
if not name.endswith(r'.csv'):
continue
if -1 == name.find(filetype):
continue
if self._time_interval:
time = np.uint64(name.rsplit(r'.')[0].rsplit(r'_')[-1])
if time < self._time_interval[0] or time > self._time_interval[1]:
continue
names_of_filetype.append(name)
return names_of_filetype
def describle(self):
'''当前目录下相关Log文件总体描述,每类Log文件合并为一个文件
输出文件名,大小,行数,时间范围,airtime范围等,每个Log文件一列
'''
df = pd.DataFrame()
for type, logfile in self._logfiles.items():
df.at[type, 'size'] = logfile.size
df.at[type, 'num_of_files'] = len(logfile.files)
df.at[type, 'num_of_lines'] = logfile.lines
df.at[type, 'pctime_start'] = logfile.pctimes[0]
df.at[type, 'pctime_end'] = logfile.pctimes[1]
df.at[type, 'airtime_start'] = logfile.airtimes[0]
df.at[type, 'airtime_end'] = logfile.airtimes[1]
df.index.name = 'filename'
return df
class LogFile(object):
'''Log文件接口类'''
def __init__(self, type, directory, files, id_filter=None):
'''初始化Log实例,把所有Log按照类型分类
Args:
file: 文件名
type: log类型
'''
self._files = files
self._type = type
self._directory = directory
self._id_filter = id_filter
self._time_filter = None
self._size = sum([os.path.getsize(os.path.join(directory, file)) for file in files])
self._pctimes = [-1, -1]
self._airtimes = [-1, -1]
self._lines = 0
cols = ['LocalTime', 'AirTime']
for data in self.gen_of_cols(cols):
if len(data.index) == 0:
self._lines = 0
return
self._lines = self._lines + data.index.max()
if self._pctimes[0] == -1:
self._pctimes[0] = data.iat[0, 0]
self._pctimes[1] = data.iat[-1, 0]
if self._airtimes[0] == -1:
self._airtimes[0] = data.iat[0, 1]
self._airtimes[1] = data.iat[-1, 1]
@property
def type(self):
return self._type
@property
def files(self):
return self._files
@property
def size(self):
return self._size
@property
def id_filter(self):
return self._id_filter
@property
def lines(self):
'''获取文件总行数'''
return self._lines
@property
def pctimes(self):
'''PC时间范围'''
return tuple(self._pctimes)
@property
def airtimes(self):
'''AirTime时间范围'''
return tuple(self._airtimes)
@staticmethod
def addtime(time1, time2):
time1 = np.uint32(time1)
time2 = np.uint32(time2)
frm = time1 // 16 + time2 // 16
subfrm = time1 % 16 + time2 % 16
if subfrm >= 10:
subfrm -= 10
frm += 1
return frm % 0x10000000 * 16 + subfrm
@staticmethod
def difftime(time1, time2):
time1 = np.uint32(time1)
time2 = np.uint32(time2)
subfrm1 = time1 % 16
subfrm2 = time2 % 16
frm = time1 // 16 + 0x10000000 - time2 // 16
if subfrm1 >= subfrm2:
subfrm = subfrm1 - subfrm2
else:
subfrm = subfrm1 + 10 - subfrm2
frm = frm - 1
frm = frm % 0x10000000
return frm * 16 + subfrm
@staticmethod
def dectime(hextime):
hextime = np.uint32(hextime)
return hextime // 16 * 10 + hextime % 16
@staticmethod
def hextime(dectime):
dectime = np.uint32(dectime)
return dectime // 10 * 16 + dectime % 10
def gen_of_cols(self, cols=None, val_filter=None):
'''获取指定列的生成器
Args:
cols: 列名列表,如果为None,表示获取全部列
col_val_filter: 过滤条件,字典格式{'colname': [val1,]}
Yields:
生成器格式
'''
filters = {}
if val_filter:
filters.update(val_filter)
if self._id_filter:
filters.update(self._id_filter)
totcols = cols
aircol = 'AirTime'
if self._time_filter and aircol not in totcols :
totcols.append(aircol)
if cols is not None:
totcols = list(set.union(set(filters), set(cols)))
for file in self._files:
filename = os.path.join(self._directory, file)
data = | pd.read_csv(filename, na_values='-', usecols=totcols) | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.core.base import DataError
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
[2, 2, 8, 2, 6],
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
| pd.Timestamp("2018-01-06") | pandas.Timestamp |
# ref: alt-ed-covid-2...analysis_1_vars_and_regression.py
# ref: alt-ed-matching-effects-2...analysis_1_vars_and_regression.py
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from statsmodels.iolib.summary2 import summary_col
def fsImproveProviderNames(sColName):
sMassagedName = sColName
sMassagedName = sMassagedName.replace('provider_hirability_1', 'provider_hirability_b_nacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_1', 'provider_impressed_b_nacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_hirability_2', 'provider_hirability_c_nacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_impressed_2', 'provider_impressed_c_nacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_hirability_3', 'provider_hirability_d_nacc_yself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_3', 'provider_impressed_d_nacc_yself_yother')
sMassagedName = sMassagedName.replace('provider_hirability_4', 'provider_hirability_e_yacc_nself_nother')
sMassagedName = sMassagedName.replace('provider_impressed_4', 'provider_impressed_e_yacc_nself_nother')
sMassagedName = sMassagedName.replace('provider_hirability_5', 'provider_hirability_f_yacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_5', 'provider_impressed_f_yacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_hirability_6', 'provider_hirability_g_yacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_impressed_6', 'provider_impressed_g_yacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_hirability_7', 'provider_hirability_h_yacc_yself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_7', 'provider_impressed_h_yacc_yself_yother')
return sMassagedName
def fsReformatColumnNames(sColName):
sMassagedName = sColName.replace(',', '').replace(
' ', '_').replace('-', '_').replace('>', '').replace('+', '').lower()
sMassagedName = sMassagedName.replace('?', '')
sMassagedName = sMassagedName.replace('(', '')
sMassagedName = sMassagedName.replace(')', '')
sMassagedName = sMassagedName.replace('.', '_')
sMassagedName = sMassagedName.replace('_/_', '_')
sMassagedName = sMassagedName.replace('__', '_')
sMassagedName = sMassagedName.replace('how_impressed_would_you_be_if_you_heard_that_someone_studied_at_this_school', 'provider_impressed')
sMassagedName = sMassagedName.replace('how_impressed_would_you_be_if_you_heard_that_someone_studied_at_', 'provider_impressed_')
sMassagedName = sMassagedName.replace('for_many_professions_learning_at_this_school_can_qualify_a_person_for_an_entry_level_position', 'provider_hirability')
return sMassagedName
def getData(dropFirstDummy=True):
df = pd.read_csv('prestige-hidden.csv')
# ref: https://stackoverflow.com/a/51428632/3931488
print(df.columns)
# df.replace(to_replace="Not employed at present", value="a", inplace=True)
# df.replace(to_replace="I usually spend more time with customers and external business partners than with coworkers.", value="b", inplace=True)
# df.replace(to_replace="I usually spend at least an hour each day with customers and external business partners.", value="c", inplace=True)
# df.replace(to_replace="I usually spend less than an hour each day in direct contact with customers and external business partners.", value="d", inplace=True)
# df = df.replace("Not employed at present", "a")
# df = df.replace("I usually spend more time with customers and external business partners than with coworkers.", "b")
# df = df.replace("I usually spend at least an hour each day with customers and external business partners.", "c")
# df = df.replace("I usually spend less than an hour each day in direct contact with customers and external business partners.", "d")
df.rename(columns={
"Do you contribute to hiring and firing decisions at your company?": "manager_effects",
"For many professions, alternative credentials can qualify a person for an entry-level position.": "baseline_hirability", # aka favorability
"It will soon become fairly conventional for high school graduates to obtain alternative credentials instead of going to college.": "conventional_alt_creds",
"It will soon become common for high school graduates to obtain alternative credentials instead of going to college.": "conventional_alt_creds",
"When you add up the pros and cons for online education, it's probably a good thing for society overall.": "favor_online_ed",
"Which of these industries most closely matches your profession?": "industry",
"Gender?": "gender",
"Household Income?": "income",
"Age?": "age",
"What is the highest level of education you have completed?": "education",
"Which race/ethnicity best describes you? (Please choose only one.) ": "ethnicity",
"What state do you reside in?": "state",
"What is the name of a reputable certification or non-college credential in your profession? Use “n/a” if nothing comes to mind.": "named_credential",
"I prefer to hire or work with a person that has a college degree rather a person that holds a reputable certification or non-college credential.": "cat_prefer_degree",
"Do you tend to work more closely with coworkers at your company or customers and external business partners?": "work_with_external_partners",
}, inplace=True)
# get dummies ref: https://stackoverflow.com/questions/55738056/using-categorical-variables-in-statsmodels-ols-class
df = pd.get_dummies(df, columns=['manager_effects'])
df = pd.get_dummies(df, columns=['industry']).rename(
fsReformatColumnNames, axis='columns')
df = pd.get_dummies(df, columns=['income'])
df = pd.get_dummies(df, columns=['age'])
df = | pd.get_dummies(df, columns=['education']) | pandas.get_dummies |
#!/usr/bin/env python
'''
This script generates training dataset for DeepAnchor.
Please include following data within a work_dir and arrange them like that:
work_dir
----raw
----loop.bedpe # ChIA-PET or other types of loop files in bedpe format
----CTCF_peak.bed.gz # The ChIP-seq peak files of CTCF
----CTCF_motif.tsv # position of all CTCF binding sites
----dna_feature.npz # one-hot representation of DNA sequence for CTCF binding sites
----cadd_feature.npz # cadd features of DNA sequence for CTCF binding sites
You should prepare loop.bedpe, CTCF-peak.bed.gz for specific sample, while CTCF_motif.tsv,
dna_feature.npz, cadd_feature.npz are non-cell-type-specific and can be downloaded from tutorial.
usage: python DeepAnchor_input.py work_dir
'''
import os
import sys
import pandas as pd
import numpy as np
import tensorflow as tf
bedpe_columns = ['chrom1','start1','end1','chrom2','start2','end2','name','score','strand1','strand2']
peak_columns = ['chrom', 'start', 'end', 'name', 'score', 'strand', 'signalValue', 'pValue', 'qValue', 'summit']
motif_columns = ['chrom','start','end','strand','score']
def generate_bed_mark_motif(file_motif, file_bed):
'''
1. file_bed should contain no overlaps
2. We mark file_motif with file_bed and generate three marks:
in_bed: 1 if motif is contained in any intervals in file_bed else 0
exclusively_in_bed: 1 if motif is exclusively contained in any intervals in file_bed else 0
maximum_in_bed: 1 if motif is the one with highest score in any intervals in file_bed esle 0
'''
df_motif = pd.read_csv(file_motif, sep='\t', names=motif_columns)
os.system('bedtools intersect -a {} -b {} -loj > temp.motif'.format(file_motif, file_bed))
bed_columns = ['bed_chrom','bed_start','bed_end']
df_matrix = pd.read_csv('temp.motif', sep='\t', names=motif_columns + bed_columns)
df_matrix = df_matrix.sort_values(['score'], ascending=False)
list1, list2, list3 = [], [], []
for bed, sub_motif in df_matrix.groupby(['bed_chrom','bed_start','bed_end']):
if bed[0] == '.':
pass
elif sub_motif.shape[0] > 1:
list1 += sub_motif.index.tolist()
list3.append(sub_motif.index.tolist()[0])
else:
list1 += sub_motif.index.tolist()
list2 += sub_motif.index.tolist()
list3 += sub_motif.index.tolist()
df_motif['in_bed'] = 0
df_motif['exclusively_in_bed'] = 0
df_motif['maximum_in_bed'] = 0
df_motif.loc[list1, 'in_bed'] = 1
df_motif.loc[list2, 'exclusively_in_bed'] = 1
df_motif.loc[list3, 'maximum_in_bed'] = 1
return df_motif
def main(argv=sys.argv):
work_dir = sys.argv[1]
os.chdir(os.path.expanduser(work_dir))
if not os.path.exists('DeepAnchor'):
os.mkdir('DeepAnchor')
file_bedpe = './raw/loop.bedpe'
file_peak = './raw/CTCF_peak.bed.gz'
file_motif = './raw/CTCF_motif.tsv'
file_dna_onehot = './raw/dna_feature.npz'
file_cadd_features = './raw/cadd_feature.npz'
print('>>> generate total anchors')
if not os.path.exists('./DeepAnchor/total_anchors.bed'):
df_loop = | pd.read_csv(file_bedpe, sep='\t', names=bedpe_columns) | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import Series, Index, isna, notna
from pandas.core.dtypes.common import is_float_dtype
from pandas.core.dtypes.missing import remove_na_arraylike
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
pytest.skip("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.loc[5:10] = np.nan
# obj.loc[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na_arraylike(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
expected = obj.apply(wrapper, axis=i)
tm.assert_panel_equal(result, expected)
else:
skipna_wrapper = alternative
wrapper = alternative
with catch_warnings(record=True):
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
expected = obj.apply(skipna_wrapper, axis=i)
tm.assert_panel_equal(result, expected)
pytest.raises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
def test_get_axis(self):
assert self.panel4d._get_axis(0) is self.panel4d.labels
assert self.panel4d._get_axis(1) is self.panel4d.items
assert self.panel4d._get_axis(2) is self.panel4d.major_axis
assert self.panel4d._get_axis(3) is self.panel4d.minor_axis
def test_set_axis(self):
with catch_warnings(record=True):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
assert 'l1' not in self.panel4d._item_cache
assert self.panel4d.labels is new_labels
self.panel4d.major_axis = new_major
assert self.panel4d[0].major_axis is new_major
assert self.panel4d.major_axis is new_major
self.panel4d.minor_axis = new_minor
assert self.panel4d[0].minor_axis is new_minor
assert self.panel4d.minor_axis is new_minor
def test_get_axis_number(self):
assert self.panel4d._get_axis_number('labels') == 0
assert self.panel4d._get_axis_number('items') == 1
assert self.panel4d._get_axis_number('major') == 2
assert self.panel4d._get_axis_number('minor') == 3
def test_get_axis_name(self):
assert self.panel4d._get_axis_name(0) == 'labels'
assert self.panel4d._get_axis_name(1) == 'items'
assert self.panel4d._get_axis_name(2) == 'major_axis'
assert self.panel4d._get_axis_name(3) == 'minor_axis'
def test_arith(self):
with catch_warnings(record=True):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
pytest.raises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
tm.assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
assert (len(list(self.panel4d.iteritems())) ==
len(self.panel4d.labels))
def test_combinePanel4d(self):
with catch_warnings(record=True):
result = self.panel4d.add(self.panel4d)
tm.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with catch_warnings(record=True):
tm.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with catch_warnings(record=True):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
tm.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
tm.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15),
axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
tm.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
tm.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
tm.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with catch_warnings(record=True):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
tm.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
tm.assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
def test_getitem(self):
pytest.raises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with catch_warnings(record=True):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
tm.assert_panel_equal(expected, result)
assert 'l2' not in self.panel4d.labels
del self.panel4d['l3']
assert 'l3' not in self.panel4d.labels
pytest.raises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4),
lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
| tm.assert_panel_equal(panel4dc[1], panel4d[1]) | pandas.util.testing.assert_panel_equal |
import os
from glob import glob
import time
import json
from PIL import Image
import pandas as pd
import numpy as np
import torchvision as tv
from rsp.data import bilinear_upsample, BANDS
from tifffile import imread as tiffread
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import base as metadata_base
from kf_d3m_primitives.remote_sensing.featurizer.remote_sensing_pretrained import (
RemoteSensingPretrainedPrimitive,
Hyperparams as rs_hp
)
from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval import (
ImageRetrievalPrimitive,
Hyperparams as ir_hp
)
from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline import ImageRetrievalPipeline
amdim_path = '/static_volumes/8946fea864c29ed785e00a9cbaa9a50295eb5a334b014f27ba20927104b07f46'
moco_path = '/static_volumes/fcc8a5a05fa7dbad8fc55584a77fc5d2c407e03a88610267860b45208e152f1f'
def load_nwpu(data_dir: str = '/NWPU-RESISC45', n_imgs = 200):
paths = sorted(glob(os.path.join(data_dir, '*/*')))
paths = [os.path.abspath(p) for p in paths]
imgs = [Image.open(p) for p in paths[:n_imgs]]
labels = [os.path.basename(os.path.dirname(p)) for p in paths[:n_imgs]]
transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(
mean = (0.3680, 0.3810, 0.3436),
std = (0.2034, 0.1854, 0.1848),
)
])
imgs = [transform(img) for img in imgs]
imgs = d3m_DataFrame(pd.DataFrame({'imgs': imgs}))
labels = np.array(labels)
return imgs, labels
def load_patch(imname):
patch = [
tiffread(f'{imname}_{band}.tif')
for band in BANDS
]
patch = np.stack([bilinear_upsample(xx) for xx in patch])
return patch
def load_big_earthnet():
fnames = sorted(glob('/test_data/bigearth-100-single/*/*.tif'))
imnames = sorted(list(set(['_'.join(f.split('_')[:-1]) for f in fnames])))
imgs = [
load_patch(img_path).astype(np.float32)
for img_path in imnames
]
imgs_df = pd.DataFrame({'image_col': imgs, 'index': range(len(imgs))})
imgs_df = d3m_DataFrame(imgs_df)
imgs_df.metadata = imgs_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, 1),
'https://metadata.datadrivendiscovery.org/types/PrimaryKey'
)
y = [i.split('/')[3] for i in imnames]
return imgs_df, np.array(y)
def iterative_labeling(features, labels, seed_idx = 2, n_rounds = 5):
# initial query image
y = (labels == labels[seed_idx]).astype(np.int)
annotations = np.zeros(features.shape[0]) - 1
annotations[seed_idx] = 1
n_pos, n_neg = 1, 0
for i in range(n_rounds):
print(f'round {i}')
# generate ranking by similarity
sampler = ImageRetrievalPrimitive(
hyperparams=ir_hp(
ir_hp.defaults(),
reduce_dimension=256
)
)
sampler.set_training_data(
inputs = features,
outputs = d3m_DataFrame( | pd.DataFrame({'annotations': annotations}) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])),
(timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)),
(date_range("20170101", periods=3), date_range("20170102", periods=3)),
(
date_range("20170101", periods=3, tz="US/Eastern"),
date_range("20170102", periods=3, tz="US/Eastern"),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
class TestAttributes:
@pytest.mark.parametrize(
"left, right",
[
(0, 1),
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timestamp("2018-01-02")),
(
Timestamp("2018-01-01", tz="US/Eastern"),
Timestamp("2018-01-02", tz="US/Eastern"),
),
],
)
@pytest.mark.parametrize("constructor", [IntervalArray, IntervalIndex])
def test_is_empty(self, constructor, left, right, closed):
# GH27219
tuples = [(left, left), (left, right), np.nan]
expected = np.array([closed != "both", False, False])
result = constructor.from_tuples(tuples, closed=closed).is_empty
tm.assert_numpy_array_equal(result, expected)
class TestMethods:
@pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
def test_set_closed(self, closed, new_closed):
# GH 21670
array = IntervalArray.from_breaks(range(10), closed=closed)
result = array.set_closed(new_closed)
expected = IntervalArray.from_breaks(range(10), closed=new_closed)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
Interval(0, 1, closed="right"),
IntervalArray.from_breaks([1, 2, 3, 4], closed="right"),
],
)
def test_where_raises(self, other):
ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left"))
match = "'value.closed' is 'right', expected 'left'."
with pytest.raises(ValueError, match=match):
ser.where([True, False, True], other=other)
def test_shift(self):
# https://github.com/pandas-dev/pandas/issues/31495
a = IntervalArray.from_breaks([1, 2, 3])
result = a.shift()
# int -> float
expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])
tm.assert_interval_array_equal(result, expected)
def test_shift_datetime(self):
a = IntervalArray.from_breaks(date_range("2000", periods=4))
result = a.shift(2)
expected = a.take([-1, -1, 0], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
result = a.shift(-1)
expected = a.take([1, 2, -1], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
class TestSetitem:
def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
result = IntervalArray.from_arrays(left, right)
if result.dtype.subtype.kind not in ["m", "M"]:
msg = "'value' should be an interval type, got <.*NaTType'> instead."
with pytest.raises(TypeError, match=msg):
result[0] = pd.NaT
if result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
with pytest.raises(ValueError, match=msg):
result[0] = np.NaN
return
result[0] = np.nan
expected_left = Index([left._na_value] + list(left[1:]))
expected_right = Index([right._na_value] + list(right[1:]))
expected = | IntervalArray.from_arrays(expected_left, expected_right) | pandas.core.arrays.IntervalArray.from_arrays |
from bs4 import BeautifulSoup
from bs4.element import Comment
import pandas as pd
import requests
# Processa o sitemap principal, para encontrar os sitemaps internos (organizados po dia)
sitemap_url = 'https://towardsdatascience.com/sitemap/sitemap.xml'
xml = requests.get(sitemap_url).content
soup = BeautifulSoup(xml, "xml")
sitemaps = soup.find_all('sitemap')
# Cria as listas que serão as colunas do dataframe
c_titulo = list()
c_url = list()
c_data = list()
c_ano = list()
c_mes = list()
c_dia = list()
c_dia_semana = list()
c_ultima_atualizacao = list()
c_frequencia_atualizacao = list()
c_prioridade = list()
for sitemap_inter in sitemaps:
# Processa as features de 1 sitemap interno (url, ano, data)
loc = sitemap_inter.find('loc').text
xml_data = requests.get(loc).content
soup = BeautifulSoup(xml_data, "xml")
# Extrai a data e o tipo de sitemap (posts ou tags)
sitemap_url_split = loc.split('/')
sitemap_url_end = sitemap_url_split[-1]
if not sitemap_url_end.startswith('posts'):
continue
# Obtem a data do sitemap, a partir da url
date = sitemap_url_end.replace('posts-', '').replace('.xml', '')
sitemap_date_split = date.split('-')
year = sitemap_date_split[0]
month = sitemap_date_split[1]
day = sitemap_date_split[2]
day_of_week = pd.Timestamp(date).day_name()
print('Processando o sitemap: ' + sitemap_url_end)
# Processa as urls de 1 sitemap interno, contendo o cabaçalho dos artigos
urlset = soup.find_all('url')
for item in urlset:
loc = item.find('loc').text
print('...Extraindo dados do artigo: ' + loc)
# Extrai o titulo da url
loc_end = loc.split('/')[-1]
loc_split = loc_end.split('-')
title = ''
for i in range(len(loc_split) - 1):
title += ' ' + loc_split[i]
title = title.strip()
c_titulo.append(title)
c_url.append(loc)
c_data.append(date)
c_ano.append(year)
c_mes.append(month)
c_dia.append(day)
c_dia_semana.append(day_of_week)
c_ultima_atualizacao.append(item.find('lastmod').text)
c_frequencia_atualizacao.append(item.find('changefreq').text)
c_prioridade.append(item.find('priority').text)
# Cria o dataframe para salvar em .csv
dict = {
'titulo': c_titulo,
'url': c_url,
'data': c_data,
'ano': c_ano,
'mes': c_mes,
'dia': c_dia,
'diaSemana': c_dia_semana,
'ultimaAtualizacao': c_ultima_atualizacao,
'frequenciaAtualizacao': c_frequencia_atualizacao,
'prioridade': c_prioridade}
df = | pd.DataFrame(dict) | pandas.DataFrame |
import pandas as pd
from genomics_data_index.storage.io.mutation.NucleotideSampleData import NucleotideSampleData
def test_combine_vcf_mask():
num_annotations = 9
data_vcf = [
['SampleA', 'ref', 10, 'A', 'T', 'SNP', 'file', 'ref:10:A:T'] + [pd.NA] * num_annotations,
]
data_mask = [
['SampleA', 'ref', 1, 1, '?', 'UNKNOWN_MISSING', 'file', 'ref:1:1:?']
]
columns = ['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID',
'ANN.Allele', 'ANN.Annotation', 'ANN.Annotation_Impact', 'ANN.Gene_Name', 'ANN.Gene_ID',
'ANN.Feature_Type', 'ANN.Transcript_BioType', 'ANN.HGVS.c', 'ANN.HGVS.p']
sample_data = NucleotideSampleData('SampleA', vcf_file=None, vcf_file_index=None,
mask_bed_file=None, preprocessed=True)
vcf_df = pd.DataFrame(data_vcf, columns=columns)
mask_df = pd.DataFrame(data_mask, columns=['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID'])
combined_df = sample_data.combine_vcf_mask(vcf_frame=vcf_df, mask_frame=mask_df)
combined_df = combined_df.sort_values('POS').fillna('NA')
assert combined_df.columns.tolist() == columns
assert 2 == len(combined_df)
assert ['ref', 'ref'] == combined_df['CHROM'].tolist()
assert [1, 10] == combined_df['POS'].tolist()
assert [1, 'A'] == combined_df['REF'].tolist()
assert ['?', 'T'] == combined_df['ALT'].tolist()
assert ['UNKNOWN_MISSING', 'SNP'] == combined_df['TYPE'].tolist()
assert ['ref:1:1:?', 'ref:10:A:T'] == combined_df['VARIANT_ID'].tolist()
assert ['NA', 'NA'] == combined_df['ANN.Annotation'].tolist()
def test_combine_vcf_mask_overlap_feature():
num_annotations = 9
data_vcf = [
['SampleA', 'ref', 10, 'A', 'T', 'SNP', 'file', 'ref:10:A:T'] + [pd.NA] * num_annotations,
]
data_mask = [
['SampleA', 'ref', 10, 1, '?', 'UNKNOWN_MISSING', 'file', 'ref:10:1:?']
]
columns = ['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID',
'ANN.Allele', 'ANN.Annotation', 'ANN.Annotation_Impact', 'ANN.Gene_Name', 'ANN.Gene_ID',
'ANN.Feature_Type', 'ANN.Transcript_BioType', 'ANN.HGVS.c', 'ANN.HGVS.p']
sample_data = NucleotideSampleData('SampleA', vcf_file=None, vcf_file_index=None,
mask_bed_file=None, preprocessed=True)
vcf_df = pd.DataFrame(data_vcf, columns=columns)
mask_df = pd.DataFrame(data_mask, columns=['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID'])
combined_df = sample_data.combine_vcf_mask(vcf_frame=vcf_df, mask_frame=mask_df)
combined_df = combined_df.sort_values('POS').fillna('NA')
assert combined_df.columns.tolist() == columns
assert 1 == len(combined_df)
assert ['ref'] == combined_df['CHROM'].tolist()
assert [10] == combined_df['POS'].tolist()
assert [1] == combined_df['REF'].tolist()
assert ['?'] == combined_df['ALT'].tolist()
assert ['UNKNOWN_MISSING'] == combined_df['TYPE'].tolist()
assert ['ref:10:1:?'] == combined_df['VARIANT_ID'].tolist()
assert ['NA'] == combined_df['ANN.Annotation'].tolist()
def test_combine_vcf_mask_no_mask_features():
num_annotations = 9
data_vcf = [
['SampleA', 'ref', 10, 'A', 'T', 'SNP', 'file', 'ref:10:A:T'] + [pd.NA] * num_annotations,
]
data_mask = [
]
columns = ['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID',
'ANN.Allele', 'ANN.Annotation', 'ANN.Annotation_Impact', 'ANN.Gene_Name', 'ANN.Gene_ID',
'ANN.Feature_Type', 'ANN.Transcript_BioType', 'ANN.HGVS.c', 'ANN.HGVS.p']
sample_data = NucleotideSampleData('SampleA', vcf_file=None, vcf_file_index=None,
mask_bed_file=None, preprocessed=True)
vcf_df = pd.DataFrame(data_vcf, columns=columns)
mask_df = pd.DataFrame(data_mask, columns=['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID'])
combined_df = sample_data.combine_vcf_mask(vcf_frame=vcf_df, mask_frame=mask_df)
combined_df = combined_df.sort_values('POS').fillna('NA')
assert combined_df.columns.tolist() == columns
assert 1 == len(combined_df)
assert ['ref'] == combined_df['CHROM'].tolist()
assert [10] == combined_df['POS'].tolist()
assert ['A'] == combined_df['REF'].tolist()
assert ['T'] == combined_df['ALT'].tolist()
assert ['SNP'] == combined_df['TYPE'].tolist()
assert ['ref:10:A:T'] == combined_df['VARIANT_ID'].tolist()
assert ['NA'] == combined_df['ANN.Annotation'].tolist()
def test_combine_vcf_mask_no_vcf_feature():
data_vcf = [
]
data_mask = [
['SampleA', 'ref', 10, 1, '?', 'UNKNOWN_MISSING', 'file', 'ref:10:1:?']
]
columns = ['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID',
'ANN.Allele', 'ANN.Annotation', 'ANN.Annotation_Impact', 'ANN.Gene_Name', 'ANN.Gene_ID',
'ANN.Feature_Type', 'ANN.Transcript_BioType', 'ANN.HGVS.c', 'ANN.HGVS.p']
sample_data = NucleotideSampleData('SampleA', vcf_file=None, vcf_file_index=None,
mask_bed_file=None, preprocessed=True)
vcf_df = pd.DataFrame(data_vcf, columns=columns)
mask_df = pd.DataFrame(data_mask, columns=['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID'])
combined_df = sample_data.combine_vcf_mask(vcf_frame=vcf_df, mask_frame=mask_df)
combined_df = combined_df.sort_values('POS').fillna('NA')
assert combined_df.columns.tolist() == columns
assert 1 == len(combined_df)
assert ['ref'] == combined_df['CHROM'].tolist()
assert [10] == combined_df['POS'].tolist()
assert [1] == combined_df['REF'].tolist()
assert ['?'] == combined_df['ALT'].tolist()
assert ['UNKNOWN_MISSING'] == combined_df['TYPE'].tolist()
assert ['ref:10:1:?'] == combined_df['VARIANT_ID'].tolist()
assert ['NA'] == combined_df['ANN.Annotation'].tolist()
def test_combine_vcf_mask_same_position_different_sequence():
num_annotations = 9
data_vcf = [
['SampleA', 'ref', 10, 'A', 'T', 'SNP', 'file', 'ref:10:A:T'] + [pd.NA] * num_annotations,
]
data_mask = [
['SampleA', 'ref2', 10, 1, '?', 'UNKNOWN_MISSING', 'file', 'ref2:10:1:?']
]
columns = ['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID',
'ANN.Allele', 'ANN.Annotation', 'ANN.Annotation_Impact', 'ANN.Gene_Name', 'ANN.Gene_ID',
'ANN.Feature_Type', 'ANN.Transcript_BioType', 'ANN.HGVS.c', 'ANN.HGVS.p']
sample_data = NucleotideSampleData('SampleA', vcf_file=None, vcf_file_index=None,
mask_bed_file=None, preprocessed=True)
vcf_df = pd.DataFrame(data_vcf, columns=columns)
mask_df = | pd.DataFrame(data_mask, columns=['SAMPLE', 'CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'FILE', 'VARIANT_ID']) | pandas.DataFrame |
# USAGE
# python test_network.py --model santa_not_santa.model --image images/examples/santa_01.png
# import the necessary packages
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import argparse
import imutils
import cv2
from PIL import Image
import glob
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to trained model model")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
x =[]
li = []
for filename in glob.glob(args["image"]+"/*.png"):
x.append(filename[11:])
new_set = [int(s.replace('.png', '')) for s in x]
new_set.sort()
new_n_set = ["parse_imgs/"+ str(s) + ".png" for s in new_set]
#print(new_n_set)
for filename in new_n_set:
# load the image
image = cv2.imread(filename)
orig = image.copy()
# pre-process the image for classification
image = cv2.resize(image, (30, 30))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# load the trained convolutional neural network
print("[INFO] loading network...")
model = load_model(args["model"])
a = model.predict(image)[0]
#print(a)
pred = str(np.argmax(a))
li.append(pred)
print(str(np.argmax(a))+"\t" +filename)
df = pd.DataFrame({'pred':li})
writer = | ExcelWriter('abc.xlsx') | pandas.ExcelWriter |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
( | Timestamp('2000-01-19 00:00:00') | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def _get_page_num_dxsyl() -> int:
"""
东方财富网-数据中心-新股数据-打新收益率-总页数
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 总页数
:rtype: int
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": '1',
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
total_page = data_json["pages"]
return total_page
def stock_dxsyl_em() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl()
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": str(page),
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json["data"]])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"-",
]
big_df = big_df[[
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
]]
big_df["发行价"] = pd.to_numeric(big_df["发行价"], errors='coerce')
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["网上-发行中签率"] = pd.to_numeric(big_df["网上-发行中签率"])
big_df["网上-有效申购股数"] = pd.to_numeric(big_df["网上-有效申购股数"])
big_df["网上-有效申购户数"] = pd.to_numeric(big_df["网上-有效申购户数"])
big_df["网上-超额认购倍数"] = pd.to_numeric(big_df["网上-超额认购倍数"])
big_df["网下-配售中签率"] = pd.to_numeric(big_df["网下-配售中签率"])
big_df["网下-有效申购股数"] = pd.to_numeric(big_df["网下-有效申购股数"])
big_df["网下-有效申购户数"] = pd.to_numeric(big_df["网下-有效申购户数"])
big_df["网下-配售认购倍数"] = pd.to_numeric(big_df["网下-配售认购倍数"])
big_df["总发行数量"] = pd.to_numeric(big_df["总发行数量"])
big_df["开盘溢价"] = pd.to_numeric(big_df["开盘溢价"])
big_df["首日涨幅"] = pd.to_numeric(big_df["首日涨幅"])
big_df["打新收益"] = pd.to_numeric(big_df["打新收益"])
return big_df
def stock_xgsglb_em(symbol: str = "京市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param symbol: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
if symbol == "京市A股":
params = {
'sortColumns': 'APPLY_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'columns': 'ALL',
'reportName': 'RPT_NEEQ_ISSUEINFO_LIST',
'quoteColumns': 'f14~01~SECURITY_CODE~SECURITY_NAME_ABBR',
'source': 'NEEQSELECT',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, 1+int(total_page)), leave=False):
params.update({
'pageNumber': page
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
'序号',
'-',
'代码',
'-',
'简称',
'申购代码',
'发行总数',
'-',
'发行价格',
'发行市盈率',
'申购日',
'发行结果公告日',
'上市日',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'网上申购缴款日',
'网上申购退款日',
'-',
'网上获配比例',
'最新价',
'首日收盘价',
'网下有效申购倍数',
'每百股获利',
'-',
'-',
'-',
'-',
'-',
'-',
]
big_df = big_df[[
'序号',
'代码',
'简称',
'申购代码',
'发行总数',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'发行价格',
'最新价',
'首日收盘价',
'申购日',
'网上申购缴款日',
'网上申购退款日',
'上市日',
'发行结果公告日',
'发行市盈率',
'网上获配比例',
'网下有效申购倍数',
'每百股获利',
]]
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行数量'] = pd.to_numeric(big_df['网上发行数量'])
big_df['顶格申购所需资金'] = pd.to_numeric(big_df['顶格申购所需资金'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['网上获配比例'] = pd.to_numeric(big_df['网上获配比例'])
big_df['网下有效申购倍数'] = pd.to_numeric(big_df['网下有效申购倍数'])
big_df['每百股获利'] = pd.to_numeric(big_df['每百股获利'])
big_df['申购日'] = pd.to_datetime(big_df['申购日']).dt.date
big_df['网上申购缴款日'] = pd.to_datetime(big_df['网上申购缴款日']).dt.date
big_df['网上申购退款日'] = pd.to_datetime(big_df['网上申购退款日']).dt.date
big_df['上市日'] = pd.to_datetime(big_df['上市日']).dt.date
big_df['发行结果公告日'] = pd.to_datetime(big_df['发行结果公告日']).dt.date
return big_df
else:
params = {
'sortColumns': 'APPLY_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPTA_APP_IPOAPPLY',
'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS',
'filter': market_map[symbol],
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"股票代码",
"股票简称",
"_",
"申购代码",
"_",
"_",
"_",
"发行总数",
"网上发行",
"_",
"顶格申购需配市值",
"_",
"申购上限",
"_",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"中签率",
"询价累计报价倍数",
"_",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"行业市盈率",
"_",
"_",
"_",
]
big_df = big_df[
[
"股票代码",
"股票简称",
"申购代码",
"发行总数",
"网上发行",
"顶格申购需配市值",
"申购上限",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"行业市盈率",
"中签率",
"询价累计报价倍数",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
]
]
big_df['申购日期'] = pd.to_datetime(big_df['申购日期']).dt.date
big_df['中签号公布日'] = pd.to_datetime(big_df['中签号公布日']).dt.date
big_df['中签缴款日期'] = pd.to_datetime(big_df['中签缴款日期']).dt.date
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行'] = pd.to_numeric(big_df['网上发行'])
big_df['顶格申购需配市值'] = pd.to_numeric(big_df['顶格申购需配市值'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['行业市盈率'] = pd.to_nume | ric(big_df['行业市盈率']) | pandas.to_numeric |
"""MovieLens dataset"""
import numpy as np
import os
import re
import pandas as pd
import scipy.sparse as sp
import torch as th
import dgl
from dgl.data.utils import download, extract_archive, get_download_dir
_urls = {
'ml-100k' : 'http://files.grouplens.org/datasets/movielens/ml-100k.zip',
'ml-1m' : 'http://files.grouplens.org/datasets/movielens/ml-1m.zip',
'ml-10m' : 'http://files.grouplens.org/datasets/movielens/ml-10m.zip',
}
READ_DATASET_PATH = get_download_dir()
GENRES_ML_100K =\
['unknown', 'Action', 'Adventure', 'Animation',
'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
'Thriller', 'War', 'Western']
GENRES_ML_1M = GENRES_ML_100K[1:]
GENRES_ML_10M = GENRES_ML_100K + ['IMAX']
class MovieLens(object):
"""MovieLens dataset used by GCMC model
TODO(minjie): make this dataset more general
The dataset stores MovieLens ratings in two types of graphs. The encoder graph
contains rating value information in the form of edge types. The decoder graph
stores plain user-movie pairs in the form of a bipartite graph with no rating
information. All graphs have two types of nodes: "user" and "movie".
The training, validation and test set can be summarized as follows:
training_enc_graph : training user-movie pairs + rating info
training_dec_graph : training user-movie pairs
valid_enc_graph : training user-movie pairs + rating info
valid_dec_graph : validation user-movie pairs
test_enc_graph : training user-movie pairs + validation user-movie pairs + rating info
test_dec_graph : test user-movie pairs
Attributes
----------
train_enc_graph : dgl.DGLHeteroGraph
Encoder graph for training.
train_dec_graph : dgl.DGLHeteroGraph
Decoder graph for training.
train_labels : torch.Tensor
The categorical label of each user-movie pair
train_truths : torch.Tensor
The actual rating values of each user-movie pair
valid_enc_graph : dgl.DGLHeteroGraph
Encoder graph for validation.
valid_dec_graph : dgl.DGLHeteroGraph
Decoder graph for validation.
valid_labels : torch.Tensor
The categorical label of each user-movie pair
valid_truths : torch.Tensor
The actual rating values of each user-movie pair
test_enc_graph : dgl.DGLHeteroGraph
Encoder graph for test.
test_dec_graph : dgl.DGLHeteroGraph
Decoder graph for test.
test_labels : torch.Tensor
The categorical label of each user-movie pair
test_truths : torch.Tensor
The actual rating values of each user-movie pair
user_feature : torch.Tensor
User feature tensor. If None, representing an identity matrix.
movie_feature : torch.Tensor
Movie feature tensor. If None, representing an identity matrix.
possible_rating_values : np.ndarray
Available rating values in the dataset
Parameters
----------
name : str
Dataset name. Could be "ml-100k", "ml-1m", "ml-10m"
device : torch.device
Device context
mix_cpu_gpu : boo, optional
If true, the ``user_feature`` attribute is stored in CPU
use_one_hot_fea : bool, optional
If true, the ``user_feature`` attribute is None, representing an one-hot identity
matrix. (Default: False)
symm : bool, optional
If true, the use symmetric normalize constant. Otherwise, use left normalize
constant. (Default: True)
test_ratio : float, optional
Ratio of test data
valid_ratio : float, optional
Ratio of validation data
"""
def __init__(self, name, device, mix_cpu_gpu=False,
use_one_hot_fea=False, symm=True,
test_ratio=0.1, valid_ratio=0.1):
self._name = name
self._device = device
self._symm = symm
self._test_ratio = test_ratio
self._valid_ratio = valid_ratio
# download and extract
download_dir = get_download_dir()
zip_file_path = '{}/{}.zip'.format(download_dir, name)
download(_urls[name], path=zip_file_path)
extract_archive(zip_file_path, '{}/{}'.format(download_dir, name))
if name == 'ml-10m':
root_folder = 'ml-10M100K'
else:
root_folder = name
self._dir = os.path.join(download_dir, name, root_folder)
print("Starting processing {} ...".format(self._name))
self._load_raw_user_info()
self._load_raw_movie_info()
print('......')
if self._name == 'ml-100k':
self.all_train_rating_info = self._load_raw_rates(os.path.join(self._dir, 'u1.base'), '\t')
self.test_rating_info = self._load_raw_rates(os.path.join(self._dir, 'u1.test'), '\t')
self.all_rating_info = | pd.concat([self.all_train_rating_info, self.test_rating_info]) | pandas.concat |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
expected = Series([NaT])
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = | Series(data, name=n) | pandas.Series |
'''
Copyright <NAME> and <NAME>
2015, 2016, 2017, 2018
'''
from __future__ import print_function # Python 2.7 and 3 compatibility
import os
import sys
import time
import shutil
#import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Standard imports
from numpy import pi
from numpy.linalg import inv
from stat import S_ISREG, ST_CTIME, ST_MODE
from pandas import HDFStore, Series, DataFrame
from collections import OrderedDict
from pathlib import Path
# pyEPR custom imports
from . import hfss
from . import logger
from . import config
from . import AttrDict
from .hfss import ureg, CalcObject, ConstantVecCalcObject, set_property
from .toolbox import print_NoNewLine, print_color, deprecated, fact, epsilon_0, hbar, Planck, fluxQ, nck, \
divide_diagonal_by_2, print_matrix, DataFrame_col_diff, get_instance_vars,\
sort_df_col, sort_Series_idx
from .toolbox_circuits import Calcs_basic
from .toolbox_plotting import cmap_discrete, legend_translucent
from .numeric_diag import bbq_hmt, make_dispersive
import matplotlib as mpl
from .toolbox_report import plot_convergence_f_vspass, plot_convergence_max_df, plot_convergence_solved_elem, plot_convergence_maxdf_vs_sol
class Project_Info(object):
"""
Class containing options and information about the manipulation and analysis in HFSS.
Junction info:
-----------------------
self.junctions : OrderedDict()
A Josephson tunnel junction has to have its parameters specified here for the analysis.
Each junction is given a name and is specified by a dictionary.
It has the following properties:
1. `Lj_variable` : Name of HFSS variable that specifies junction inductance Lj defined on the boundary condition in HFSS. DO NOT USE Global names that start with $.
2. `rect` : Name of HFSS rectangle on which lumped boundary condition is specified.
3. `line` : Name of HFSS polyline which spans the length of the recntalge. Used to define the voltage across the junction. Used to define the current orientation for each junction. Used to define sign of ZPF.
4. `length` : Length in HFSS of the junction rectangle and line (specified in meters).
Example definition:
..code-block python
# Define a single junction
pinfo = Project_Info('')
pinfo.junctions['j1'] = {'Lj_variable' : 'Lj1',
'rect' : 'JJrect1',
'line' : 'JJline1',
'length' : parse_units('50um')} # Length is in meters
# Specify multiple junctions in HFSS model
n_junctions = 5
for i in range(1, 1+n_junctions):
pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj{i}',
'rect' : f'JJrect{i}',
'line' : f'JJline{i}',
'length' : parse_units('50um')}
HFSS app connection settings
-----------------------
project_path : str
Directory path to the hfss project file. Should be the directory, not the file.
default = None: Assumes the project is open, and thus gets the project based on `project_name`
project_name : str, None
Name of the project within the project_path. "None" will get the current active one.
design_name : str, None
Name of the design within the project. "None" will get the current active one.
setup_name : str, None
Name of the setup within the design. "None" will get the current active one.
Additional init setting:
-----------------------
do_connect : True by default. Connect to HFSS
HFSS desgin settings
-----------------------
describe junction parameters
junc_rects = None
Name of junction rectangles in HFSS
junc_lines = None
Name of lines in HFSS used to define the current orientation for each junction
junc_LJ_names = None
Name of junction inductance variables in HFSS.
Note, DO NOT USE Global names that start with $.
junc_lens = None
Junciton rect. length, measured in meters.
"""
class _Dissipative:
#TODO: remove and turn to dict
def __init__(self):
self.dielectrics_bulk = None
self.dielectric_surfaces = None
self.resistive_surfaces = None
self.seams = None
def __init__(self, project_path=None, project_name=None, design_name=None,
do_connect = True):
self.project_path = str(Path(project_path)) if not (project_path is None) else None # Path: format path correctly to system convention
self.project_name = project_name
self.design_name = design_name
self.setup_name = None
## HFSS desgin: describe junction parameters
# TODO: introduce modal labels
self.junctions = OrderedDict() # See above for help
self.ports = OrderedDict()
## Dissipative HFSS volumes and surfaces
self.dissipative = self._Dissipative()
self.options = config.options_hfss
# Conected to HFSS variable
self.app = None
self.desktop = None
self.project = None
self.design = None
self.setup = None
if do_connect:
self.connect()
_Forbidden = ['app', 'design', 'desktop', 'project',
'dissipative', 'setup', '_Forbidden', 'junctions']
def save(self, hdf):
'''
hdf : pd.HDFStore
'''
hdf['project_info'] = pd.Series(get_instance_vars(self, self._Forbidden))
hdf['project_info_dissip'] = pd.Series(get_instance_vars(self.dissipative))
hdf['project_info_options'] = pd.Series(get_instance_vars(self.options))
hdf['project_info_junctions'] = pd.DataFrame(self.junctions)
hdf['project_info_ports'] = pd.DataFrame(self.ports)
@deprecated
def connect_to_project(self):
return self.connect()
def connect(self):
'''
Connect to HFSS design.
'''
#logger.info('Connecting to HFSS ...')
self.app, self.desktop, self.project = hfss.load_ansys_project(
self.project_name, self.project_path)
self.project_name = self.project.name
self.project_path = self.project.get_path()
# Design
if self.design_name is None:
self.design = self.project.get_active_design()
self.design_name = self.design.name
logger.info(f'\tOpened active design\n\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')
else:
try:
self.design = self.project.get_design(self.design_name)
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error: {e}\n")
raise(Exception(' Did you provide the correct design name? Failed to pull up design.').with_traceback(tb))
#if not ('Eigenmode' == self.design.solution_type):
# logger.warning('\tWarning: The design tpye is not Eigenmode. Are you sure you dont want eigenmode?')
# Setup
try:
n_setups = len(self.design.get_setup_names())
if n_setups == 0:
logger.warning('\tNo design setup detected.')
if self.design.solution_type == 'Eigenmode':
logger.warning('\tCreating eigenmode default setup one.')
self.design.create_em_setup()
self.setup_name = 'Setup'
self.setup = self.design.get_setup(name=self.setup_name)
self.setup_name = self.setup.name
logger.info(f'\tOpened setup: {self.setup_name} [{type(self.setup)}]')
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error: {e}\n")
raise(Exception(' Did you provide the correct setup name? Failed to pull up setup.').with_traceback(tb))
# Finalize
self.project_name = self.project.name
self.design_name = self.design.name
logger.info('\tConnected successfully.\t :)\t :)\t :)\t\n')
return self
def check_connected(self):
"""Checks if fully connected including setup
"""
return\
(self.setup is not None) and\
(self.design is not None) and\
(self.project is not None) and\
(self.desktop is not None) and\
(self.app is not None)
def disconnect(self):
'''
Disconnect from existing HFSS design.
'''
assert self.check_connected(
) is True, "it does not appear that you have connected to HFSS yet. use connect()"
self.project.release()
self.desktop.release()
self.app.release()
hfss.release()
### UTILITY FUNCTIONS
def get_dm(self):
'''
Get the design and modeler
.. code-block:: python
oDesign, oModeler = projec.get_dm()
'''
oDesign = self.design
oModeler = oDesign.modeler
return oDesign, oModeler
def get_all_variables_names(self):
"""Returns array of all project and local design names."""
return self.project.get_variable_names() + self.design.get_variable_names()
def get_all_object_names(self):
"""Returns array of strings"""
oObjects = []
for s in ["Non Model", "Solids", "Unclassified", "Sheets", "Lines"]:
oObjects += self.design.modeler.get_objects_in_group(s)
return oObjects
def validate_junction_info(self):
""" Validate that the user has put in the junction info correctly.
Do no also forget to check the length of the rectangles/line of
the junction if you change it.
"""
all_variables_names = self.get_all_variables_names()
all_object_names = self.get_all_object_names()
for jjnm, jj in self.junctions.items():
assert jj['Lj_variable'] in all_variables_names, "pyEPR project_info user error found: Seems like for junction `%s` you specified a design or project variable for `Lj_variable` that does not exist in HFSS by the name: `%s` " % (
jjnm, jj['Lj_variable'])
for name in ['rect', 'line']:
assert jj[name] in all_object_names, "pyEPR project_info user error found: Seems like for junction `%s` you specified a %s that does not exist in HFSS by the name: `%s` " % (
jjnm, name, jj[name])
#TODO: Check the length of the rectnagle
#==============================================================================
#%% Main compuation class & interface with HFSS
#==============================================================================
class pyEPR_HFSS(object):
"""
This class defines a pyEPR_HFSS object which calculates and saves
Hamiltonian parameters from an HFSS simulation.
Further, it allows one to calcualte dissipation, etc
"""
def __init__(self, *args, **kwargs):
'''
Parameters:
-------------------
project_info : Project_Info
Suplpy the project info or the parameters to create pinfo
Example use:
-------------------
'''
if (len(args) == 1) and (args[0].__class__.__name__ == 'Project_Info'): #isinstance(args[0], Project_Info): # fails on module repload with changes
project_info = args[0]
else:
assert len(args) == 0, 'Since you did not pass a Project_info object as a arguemnt, we now assuem you are trying to create a project info object here by apassing its arguments. See Project_Info. It does not take any arguments, only kwargs.'
project_info = Project_Info(*args, **kwargs)
# Input
self.pinfo = project_info
if self.pinfo.check_connected() is False:
self.pinfo.connect()
self.verbose = True #TODO: change verbose to logger. remove verbose flags
self.append_analysis = False #TODO
# hfss connect module
self.fields = self.setup.get_fields()
self.solutions = self.setup.get_solutions()
# Variations - the following get updated in update_variation_information
self.nmodes = int(1)
self.listvariations = ("",)
self.nominalvariation = '0'
self.nvariations = 0
self.update_variation_information()
self.hfss_variables = OrderedDict() # container for eBBQ list of varibles
if self.verbose:
print('Design \"%s\" info:'%self.design.name)
print('\t%-15s %d\n\t%-15s %d' %('# eigenmodes', self.nmodes, '# variations', self.nvariations))
# Setup data saving
self.setup_data()
self.latest_h5_path = None # #self.get_latest_h5()
''' #TODO: to be implemented to use old files
if self.latest_h5_path is not None and self.append_analysis:
latest_bbq_analysis = pyEPR_Analysis(self.latest_h5_path)
if self.verbose:
print( 'Varied variables and values : ', latest_bbq_analysis.get_swept_variables(), \
'Variations : ', latest_bbq_analysis.variations)
'''
@property
def setup(self):
return self.pinfo.setup
@property
def design(self):
return self.pinfo.design
@property
def project(self):
return self.pinfo.project
@property
def desktop(self):
return self.pinfo.desktop
@property
def app(self):
return self.pinfo.app
@property
def junctions(self):
return self.pinfo.junctions
@property
def ports(self):
return self.pinfo.ports
@property
def options(self):
return self.pinfo.options
def get_latest_h5(self):
'''
No longer used. Could be added back in.
'''
dirpath = self.data_dir
entries1 = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath)) # get all entries in the directory w/ stats
entries2 = ((os.stat(path), path) for path in entries1)
entries3 = ((stat[ST_CTIME], path) # leave only regular files, insert creation date
for stat, path in entries2 if S_ISREG(stat[ST_MODE]) and path[-4:]=='hdf5')
#NOTE: on Windows `ST_CTIME` is a creation date but on Unix it could be something else
#NOTE: use `ST_MTIME` to sort by a modification date
paths_sorted = []
for cdate, path in sorted(entries3):
paths_sorted.append(path)
#print time.ctime(cdate), os.path.basename(path)
if len(paths_sorted) > 0:
self.latest_h5_path = paths_sorted[-1]
if self.verbose:
print('This simulations has been analyzed, latest data in ' + self.latest_h5_path)
else:
self.latest_h5_path = None
if self.verbose:
print('This simulation has never been analyzed')
def setup_data(self):
'''
Set up folder paths for saving data to.
'''
data_dir = Path(config.root_dir) / \
Path(self.project.name)/Path(self.design.name)
#if self.verbose:
# print("\nResults will be saved to:\n" +'- '*20+'\n\t'+ str(data_dir)+'\n'+'- '*20+'\n')
if len(self.design.name) > 50:
print_color('WARNING! DESING FILENAME MAY BE TOO LONG! ')
if not data_dir.is_dir():
data_dir.mkdir(parents=True, exist_ok=True)
self.data_dir = str(data_dir)
self.data_filename = str(
data_dir / (time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) + '.hdf5'))
"""
@deprecated
def calc_p_j(self, modes=None, variation=None):
'''
Calculates the p_j for all the modes.
Requires a calculator expression called P_J.
'''
lv = self.get_lv(variation)
if modes is None:
modes = range(self.nmodes)
pjs = OrderedDict()
for ii, m in enumerate(modes):
print('Calculating p_j for mode ' + str(m) + ' (' + str(ii) + '/' + str(np.size(modes)-1) + ')')
self.solutions.set_mode(m+1, 0)
self.fields = self.setup.get_fields()
P_J = self.fields.P_J
pjs['pj_'+str(m)] = P_J.evaluate(lv=lv)
self.pjs = pjs
if self.verbose:
print(pjs)
return pjs
"""
def calc_p_junction_single(self, mode):
'''
This function is used in the case of a single junction only.
For multiple junctions, see `calc_p_junction`.
Assumes no lumped capacitive elements.
'''
pj = OrderedDict()
pj_val = (self.U_E-self.U_H)/self.U_E
pj['pj_'+str(mode)] = np.abs(pj_val)
print(' p_j_' + str(mode) + ' = ' + str(pj_val))
return pj
#TODO: replace this method with the one below, here because osme funcs use it still
def get_freqs_bare(self, variation):
#str(self.get_lv(variation))
freqs_bare_vals = []
freqs_bare_dict = OrderedDict()
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
for m in range(self.nmodes):
freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m]
freqs_bare_vals.append(1e9*freqs[m])
if kappa_over_2pis is not None:
freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m]
else:
freqs_bare_dict['Q_'+str(m)] = 0
self.freqs_bare = freqs_bare_dict
self.freqs_bare_vals = freqs_bare_vals
return freqs_bare_dict, freqs_bare_vals
def get_freqs_bare_pd(self, variation):
'''
Retun pd.Sereis of modal freq and qs for given variation
'''
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
if kappa_over_2pis is None:
kappa_over_2pis = np.zeros(len(freqs))
freqs = pd.Series(freqs, index=range(len(freqs))) # GHz
Qs = freqs / pd.Series(kappa_over_2pis, index=range(len(freqs)))
return freqs, Qs
def get_lv(self, variation=None):
'''
List of variation variables.
Returns list of var names and var values.
Such as ['Lj1:=','13nH', 'QubitGap:=','100um']
Parameters
-----------
variation : string number such as '0' or '1' or ...
'''
if variation is None:
lv = self.nominalvariation
lv = self.parse_listvariations(lv)
else:
lv = self.listvariations[ureg(variation)]
lv = self.parse_listvariations(lv)
return lv
def get_lv_EM(self, variation):
if variation is None:
lv = self.nominalvariation
#lv = self.parse_listvariations_EM(lv)
else:
lv = self.listvariations[ureg(variation)]
#lv = self.parse_listvariations_EM(lv)
return str(lv)
def parse_listvariations_EM(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def parse_listvariations(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def get_variables(self, variation=None):
lv = self.get_lv(variation)
variables = OrderedDict()
for ii in range(int(len(lv)/2)):
variables['_'+lv[2*ii][:-2]] = lv[2*ii+1]
self.variables = variables
return variables
def calc_energy_electric(self,
variation=None,
volume='AllObjects',
smooth=False):
r'''
Calculates two times the peak electric energy, or 4 times the RMS, :math:`4*\mathcal{E}_{\mathrm{elec}}`
(since we do not divide by 2 and use the peak phasors).
.. math::
\mathcal{E}_{\mathrm{elec}}=\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}
volume : string | 'AllObjects'
smooth : bool | False
Smooth the electric field or not when performing calculation
Example use to calcualte the energy participation of a substrate
.. code-block python
ℰ_total = epr_hfss.calc_energy_electric(volume='AllObjects')
ℰ_substr = epr_hfss.calc_energy_electric(volume='Box1')
print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%')
'''
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
if smooth:
vecE = vecE.smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_energy_magnetic(self,
variation=None,
volume='AllObjects',
smooth=True):
'''
See calc_energy_electric
'''
calcobject = CalcObject([], self.setup)
vecH = calcobject.getQty("H")
if smooth:
vecH = vecH.smooth()
A = vecH.times_mu()
B = vecH.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_p_electric_volume(self,
name_dielectric3D,
relative_to='AllObjects',
E_total=None
):
r'''
Calculate the dielectric energy-participatio ratio
of a 3D object (one that has volume) relative to the dielectric energy of
a list of object objects.
This is as a function relative to another object or all objects.
When all objects are specified, this does not include any energy
that might be stored in any lumped elements or lumped capacitors.
Returns:
---------
ℰ_object/ℰ_total, (ℰ_object, _total)
'''
if E_total is None:
logger.debug('Calculating ℰ_total')
ℰ_total = self.calc_energy_electric(volume=relative_to)
else:
ℰ_total = E_total
logger.debug('Calculating ℰ_object')
ℰ_object = self.calc_energy_electric(volume=name_dielectric3D)
return ℰ_object/ℰ_total, (ℰ_object, ℰ_total)
def calc_current(self, fields, line):
'''
Function to calculate Current based on line. Not in use
line : integration line between plates - name
'''
self.design.Clear_Field_Clac_Stack()
comp = fields.Vector_H
exp = comp.integrate_line_tangent(line)
I = exp.evaluate(phase = 90)
self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_mag(self, variation, junc_rect, junc_line):
''' Peak current I_max for mdoe J in junction J
The avg. is over the surface of the junction. I.e., spatial. '''
lv = self.get_lv(variation)
jl, uj = self.get_junc_len_dir(variation, junc_line)
uj = ConstantVecCalcObject(uj, self.setup)
calc = CalcObject([], self.setup)
#calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect)
calc = (((calc.getQty("Jsurf")).dot(uj)).imag()
).integrate_surf(name=junc_rect)
I = calc.evaluate(lv=lv) / jl # phase = 90
#self.design.Clear_Field_Clac_Stack()
return I
def calc_current_line_voltage(self, variation, junc_line_name, junc_L_Henries):
'''
Peak current I_max for prespecified mode calculating line voltage across junction.
Parameters:
------------------------------------------------
variation: variation number
junc_line_name: name of the HFSS line spanning the junction
junc_L_Henries: junction inductance in henries
TODO: Smooth?
'''
lv = self.get_lv(variation)
v_calc_real = CalcObject([], self.setup).getQty(
"E").real().integrate_line_tangent(name=junc_line_name)
v_calc_imag = CalcObject([], self.setup).getQty(
"E").imag().integrate_line_tangent(name=junc_line_name)
V = np.sqrt(v_calc_real.evaluate(lv=lv)**2 +
v_calc_imag.evaluate(lv=lv)**2)
freq = CalcObject(
[('EnterOutputVar', ('Freq', "Complex"))], self.setup).real().evaluate()
return V/(2*np.pi*freq*junc_L_Henries) # I=V/(wL)s
def calc_line_current(self, variation, junc_line_name):
lv = self.get_lv(variation)
calc = CalcObject([], self.setup)
calc = calc.getQty("H").imag().integrate_line_tangent(
name=junc_line_name)
#self.design.Clear_Field_Clac_Stack()
return calc.evaluate(lv=lv)
def get_junc_len_dir(self, variation, junc_line):
'''
Return the length and direction of a junction defined by a line
Inputs: variation: simulation variation
junc_line: polyline object
Outputs: jl (float) junction length
uj (list of 3 floats) x,y,z coordinates of the unit vector
tangent to the junction line
'''
#
lv = self.get_lv(variation)
u = []
for coor in ['X', 'Y', 'Z']:
calc = CalcObject([], self.setup)
calc = calc.line_tangent_coor(junc_line, coor)
u.append(calc.evaluate(lv=lv))
jl = float(np.sqrt(u[0]**2+u[1]**2+u[2]**2))
uj = [float(u[0]/jl), float(u[1]/jl), float(u[2]/jl)]
return jl, uj
def get_Qseam(self, seam, mode, variation):
r'''
Caculate the contribution to Q of a seam, by integrating the current in
the seam with finite conductance: set in the config file
ref: http://arxiv.org/pdf/1509.01119.pdf
'''
lv = self.get_lv(variation)
Qseam = OrderedDict()
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseam['Qseam_'+seam+'_' +
str(mode)] = config.Dissipation_params.gseam/yseam
print('Qseam_' + seam + '_' + str(mode) + str(' = ') +
str(config.Dissipation_params.gseam/config.Dissipation_params.yseam))
return Series(Qseam)
def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, pltresult=True):
# values = ['5mm','6mm','7mm']
# ref: http://arxiv.org/pdf/1509.01119.pdf
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
print(variation)
print(type(variation))
print(ureg(variation))
self.U_H = self.calc_energy_magnetic(variation)
lv = self.get_lv(variation)
Qseamsweep = []
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
for value in values:
self.design.set_variable(variable, str(value)+unit)
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseamsweep.append(config.Dissipation_params.gseam/yseam)
# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam
#Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
if pltresult:
_, ax = plt.subplots()
ax.plot(values, Qseamsweep)
ax.set_yscale('log')
ax.set_xlabel(variable+' ('+unit+')')
ax.set_ylabel('Q'+'_'+seam)
return Qseamsweep
def get_Qdielectric(self, dielectric, mode, variation):
Qdielectric = OrderedDict()
print('Calculating Qdielectric_' + dielectric + ' for mode ' +
str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
U_dielectric = self.calc_energy_electric(variation, volume=dielectric)
p_dielectric = U_dielectric/self.U_E
#TODO: Update make p saved sep. and get Q for diff materials, indep. specify in pinfo
Qdielectric['Qdielectric_'+dielectric+'_' +
str(mode)] = 1/(p_dielectric*config.Dissipation_params.tan_delta_sapp)
print('p_dielectric'+'_'+dielectric+'_' +
str(mode)+' = ' + str(p_dielectric))
return Series(Qdielectric)
def get_Qsurface_all(self, mode, variation):
'''
caculate the contribution to Q of a dieletric layer of dirt on all surfaces
set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
lv = self.get_lv(variation)
Qsurf = OrderedDict()
print('Calculating Qsurface for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# A = self.fields.Mag_E**2
# A = A.integrate_vol(name='AllObjects')
# U_surf = A.evaluate(lv=lv)
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
A = vecE
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_surf(name='AllObjects')
U_surf = A.evaluate(lv=lv)
U_surf *= config.Dissipation_params.th*epsilon_0*config.Dissipation_params.eps_r
p_surf = U_surf/self.U_E
Qsurf['Qsurf_'+str(mode)] = 1 / \
(p_surf*config.Dissipation_params.tan_delta_surf)
print('p_surf'+'_'+str(mode)+' = ' + str(p_surf))
return Series(Qsurf)
def calc_Q_external(self, variation, freq_GHz, U_E):
'''
Calculate the coupling Q of mode m with each port p
Expected that you have specified the mode before calling this
'''
Qp = pd.Series({})
freq = freq_GHz * 1e9 # freq in Hz
for port_nm, port in self.pinfo.ports.items():
I_peak = self.calc_avg_current_J_surf_mag(variation, port['rect'],
port['line'])
U_dissip = 0.5 * port['R'] * I_peak**2 * 1 / freq
p = U_dissip / (U_E/2) # U_E is 2x the peak electrical energy
kappa = p * freq
Q = 2 * np.pi * freq / kappa
Qp['Q_' + port_nm] = Q
return Qp
def calc_p_junction(self, variation, U_H, U_E, Ljs):
'''
Expected that you have specified the mode before calling this, `self.set_mode(num)`
Expected to precalc U_H and U_E for mode, will retunr pandas series object
junc_rect = ['junc_rect1', 'junc_rect2'] name of junc rectangles to integrate H over
junc_len = [0.0001] specify in SI units; i.e., meters
LJs = [8e-09, 8e-09] SI units
calc_sign = ['junc_line1', 'junc_line2']
This function assumes there are no lumped capacitors in model.
Potential errors: If you dont have a line or rect by the right name you will prob get an erorr o the type:
com_error: (-2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024365), None)
'''
Pj = pd.Series({})
Sj = pd.Series({})
for junc_nm, junc in self.pinfo.junctions.items():
logger.debug(f'Calculating participation for {(junc_nm, junc)}')
# Get peak current though junction I_peak
if self.pinfo.options.method_calc_P_mj is 'J_surf_mag':
I_peak = self.calc_avg_current_J_surf_mag(
variation, junc['rect'], junc['line'])
elif self.pinfo.options.method_calc_P_mj is 'line_voltage':
I_peak = self.calc_current_line_voltage(
variation, junc['line'], Ljs[junc_nm])
else:
raise NotImplementedError(
'Other calculation methods (self.pinfo.options.method_calc_P_mj) are possible but not implemented here. ')
Pj['p_' + junc_nm] = Ljs[junc_nm] * \
I_peak**2 / U_E
# divie by U_E: participation normed to be between 0 and 1 by the total capacitive energy
# which should be the total inductive energy
# Sign bit
Sj['s_' + junc_nm] = + \
1 if (self.calc_line_current(
variation, junc['line'])) > 0 else -1
if self.verbose:
print('\t{:<15} {:>8.6g} {:>5s}'.format(
junc_nm,
Pj['p_' + junc_nm],
'+' if Sj['s_' + junc_nm] > 0 else '-'))
return Pj, Sj
def do_EPR_analysis(self,
variations=None,
modes=None):
"""
Main analysis routine
Load results with pyEPR_Analysis
..code-block python
pyEPR_Analysis(self.data_filename, variations=variations) ```
Optional Parameters:
------------------------
variations : list | None
Example list of variations is ['0', '1']
A variation is a combination of project/design variables in an optimetric sweep
modes : list | None
Modes to analyze
for example modes = [0, 2, 3]
HFSS Notes:
------------------------
Assumptions:
Low dissipation (high-Q).
Right now, we assume that there are no lumped capcitors to simply calculations. Not required.
We assume that there are only lumped inductors, so that U_tot = U_E+U_H+U_L and U_C =0, so that U_tot = 2*U_E;
"""
self._run_time = time.strftime('%Y%m%d_%H%M%S', time.localtime())
self.update_variation_information()
if modes is None:
modes = range(self.nmodes)
if variations is None:
variations = self.variations
# Setup save and save pinfo
#TODO: The pd.HDFStore is used to store the pandas sereis and dataframe, but is otherwise cumbersome.
# We should move to a better saving paradigm
if self.latest_h5_path is not None and self.append_analysis:
shutil.copyfile(self.latest_h5_path, self.data_filename)
hdf = pd.HDFStore(self.data_filename)
self.pinfo.save(hdf) # This will save only 1 globalinstance
### Main loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for ii, variation in enumerate(variations):
#TODO: Move this into a funciton calle self.analyze_variation
print(f'\nVariation {variation} [{ii+1}/{len(variations)}]')
# Previously analyzed?
if (variation+'/hfss_variables') in hdf.keys() and self.append_analysis:
print_NoNewLine(' previously analyzed ...\n')
continue
self.lv = self.get_lv(variation)
time.sleep(0.4)
if self.has_fields() == False:
logger.error(f" Error: HFSS does not have field solution for mode={mode}.\
Skipping this mode in the analysis")
continue
freqs_bare_GHz, Qs_bare = self.get_freqs_bare_pd(variation)
self.hfss_variables[variation] = pd.Series(
self.get_variables(variation=variation))
Ljs = | pd.Series({}) | pandas.Series |
import sys
# Do not show error traceback
sys.tracebacklimit=0
# Check if all packages installed
try:
from pandas.core.frame import DataFrame
import pandas as pd
except ImportError as e:
print("Package <pandas> needed to be installed before getting data ! ")
raise e
try:
import requests
except ImportError as e:
print("Package <requests> needed to be installed before getting data ! ")
raise e
try:
import xlwt
except ImportError as e:
print("Package <xlwt> needed to be installed before getting data ! ")
raise e
try:
import csv
except ImportError as e:
print("Package <csv> needed to be installed before getting data ! ")
raise e
class StockTimeSeries(object):
def __init__(self, apikey : str):
self.apikey = apikey
# Get Stock Information
# daily stock price
def GetDailyStockPrice(self, stock_id : str) -> DataFrame:
"""return DataFrame type daily stock price
The results will show daily stock price
Parameters
----------
stock_id : str
Choose the stock you want to get daily data
"""
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_DAILY", "symbol": stock_id, "outputsize": "full","apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
daily_data = data["Time Series (Daily)"]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. volume"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. volume" : "Volume"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# weekly stock price
def GetWeeklyStockPrice(self, stock_id : str) -> DataFrame:
"""return DataFrame type weekly stock price
The results will show weekly stock price
Parameters
----------
stock_id : str
Choose the stock you want to get weekly data
"""
# https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY_ADJUSTED&symbol=IBM&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_WEEKLY_ADJUSTED", "symbol": stock_id,"apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
daily_data = data["Weekly Adjusted Time Series"]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. adjusted close", "6. volume", "7. dividend amount"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. adjusted close" : "Adjusted Close", "6. volume" : "Volume", "7. dividend amount" : "Dividend Amount"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# monthly stock price
def GetMonthlyStockPrice(self, stock_id : str) -> DataFrame:
"""return DataFrame type monthly stock price
The results will show monthly stock price
Parameters
----------
stock_id : str
Choose the stock you want to get monthly data
"""
# https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY_ADJUSTED&symbol=IBM&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_MONTHLY_ADJUSTED", "symbol": stock_id,"apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
daily_data = data["Monthly Adjusted Time Series"]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. adjusted close", "6. volume", "7. dividend amount"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. adjusted close" : "Adjusted Close", "6. volume" : "Volume", "7. dividend amount" : "Dividend Amount"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# intraday stock price - most recent 1 to 2 months data
def GetIntradayStockPrice(self, stock_id : str, interval : str) -> DataFrame:
"""return DataFrame type intraday stock price
The results will show intraday stock price at certain
interval you choose
Parameters
----------
stock_id : str
Choose the stock you want to get intraday data
interval : str
Choose "1min" or "5min" or "15min" or "30min" or "60min" at time interval for intraday data
"""
# https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=IBM&interval=5min&outputsize=full&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = | pd.DataFrame() | pandas.DataFrame |
import boto3
import json,io
from fbprophet.serialize import model_to_json, model_from_json
import pandas as pd
from fbprophet import Prophet
import datetime
import matplotlib.pyplot as plt
from fastapi import FastAPI
app = FastAPI()
def gen_datetime(datetime_str):
"""
input: datetime string : format example -> '2010-11-02 21:00:00'
returns dataframe containing date points till the end of the month
"""
numdays = 30
try:
base = datetime.datetime.strptime(datetime_str,'%Y-%m-%d %H:%M:%S')
except Exception as e:
print("The datetime format is not correct",e)
return pd.DataFrame()
dates = [str((base + datetime.timedelta(days=x))) for x in range(numdays)
if (base + datetime.timedelta(days=x)).month == base.month]
return | pd.DataFrame(dates, columns=["ds"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
class TestPeriodIndexSeriesMethods(object):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assert_raises_regex(TypeError, msg):
obj + ng
with pytest.raises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assert_raises_regex(TypeError, msg):
obj - ng
with pytest.raises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
assert np.add(ng, obj) is NotImplemented
else:
with pytest.raises(TypeError):
np.add(ng, obj)
with pytest.raises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
assert np.subtract(ng, obj) is NotImplemented
else:
with pytest.raises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
obj + pd.offsets.Hour(2)
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
pd.offsets.Hour(2) + obj
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
assert result is NotImplemented
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH 13071
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, np.nan, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = | pd.Index([12, np.nan, 10, 9], name='idx') | pandas.Index |
#!/usr/bin/python
# Imports
import pandas as pd
import numpy as np
from collections import Counter
import tqdm
import math, os
from sklearn.metrics import mean_squared_error
from scipy.sparse.csgraph import minimum_spanning_tree as mst_nsim
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from scipy import sparse
import implicit
from .data import preprocess_binary
# Methods
def compute_rmse(preds, ground_truth):
grouped = pd.DataFrame({'count' : ground_truth.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
pred_values = []
real_values = []
for index, row in tqdm.tqdm(grouped.iterrows(), total=grouped.shape[0], position=0):
user_index = preds.index.tolist().index(row['user_nickname'])
town_index = preds.columns.tolist().index(row['town'])
#pred_values.append(predictions[(predictions.index==row['user_nickname'])][row['town']][0])
pred_values.append(preds.iloc[user_index,town_index])
real_values.append(float(row['count']))
rms = math.sqrt(mean_squared_error(real_values, pred_values))
return rms
def compute_precision_recall_N(PR, valid, N):
grouped_val = pd.DataFrame({'count' : valid.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
concat_preds = pd.DataFrame()
for interval in range(1000,PR.shape[0]+1000,1000):
flat_preds = pd.melt(PR.iloc[interval-1000:interval],
id_vars='user_nickname',
value_vars=PR.iloc[interval-1000:interval].columns, # list of days of the week
var_name='town',
value_name='predicted_count')
flat_preds['user_nickname'] = PR.iloc[interval-1000:interval].index.tolist() * len(PR.columns)
flat_preds = flat_preds[flat_preds.predicted_count >= 0.]
flat_preds = flat_preds.groupby('user_nickname')[['user_nickname','town','predicted_count']].apply(lambda grp: grp.nlargest(N,'predicted_count'))
concat_preds = | pd.concat([concat_preds, flat_preds], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 15:14:59 2017
@author: 028375
"""
from __future__ import unicode_literals, division
import pandas as pd
import os.path
import numpy as np
def get_Cost(Outputs,DataFrame0,DateType0,Date0,CostType0,flag0):
if flag0==1:
Cost0=DataFrame0[DataFrame0[DateType0]<=Date0][['ContractID','Upfront结算货币']]
elif flag0==0:
Cost0=DataFrame0[DataFrame0[DateType0]>Date0][['ContractID','Upfront结算货币']]
Cost0['Upfront结算货币']=-Cost0['Upfront结算货币']
Outputs=pd.merge(Outputs,Cost0,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':CostType0})
Outputs[CostType0]=Outputs[CostType0].replace(np.nan,0.)
return Outputs
def get_Cost_NPV(Outputs,DataFrame0,DateType0,Date0,CostType1,CostType2,CostType3,NPVType):
Outputs=get_Cost(Outputs,DataFrame0,DateType0,Date0,CostType1,1)
Outputs=get_Cost(Outputs,DataFrame0,DateType0,Date0,CostType2,0)
Outputs[CostType3]=Outputs[CostType1]+Outputs[CostType2]
NPV0=DataFrame0[['ContractID','RMB合约估值']]
Outputs=pd.merge(Outputs,NPV0,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'RMB合约估值':NPVType})
Outputs[NPVType]=Outputs[NPVType].replace(np.nan,0.)
return Outputs
def Collateral_Separate(Outputs,collateral,CollateralType):
tmp=collateral[collateral['现金流类型']==CollateralType][['ContractID','确认金额(结算货币)']]
tmp=tmp.groupby(by=['ContractID'],as_index=False)['确认金额(结算货币)'].sum()
Outputs=pd.merge(Outputs,tmp,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'确认金额(结算货币)':CollateralType})
Outputs[CollateralType]=Outputs[CollateralType].replace(np.nan,0.)
return Outputs
def Check2(lastmonth,thismonth,collateral,lastdate,spotdate):
#成本&估值
ContractID=thismonth['ContractID'].append(lastmonth['ContractID']).drop_duplicates()
Outputs=pd.DataFrame(ContractID).reset_index(drop=True)
Cost00='成本_期初表_到期'
Cost01='成本_期初表_存续'
Cost0='成本_期初表'
NPV0='估值_期初表'
Cost10='成本_期末表_存续'
Cost11='成本_期末表_新起'
Cost1='成本_期末表'
NPV1='估值_期末表'
Outputs=get_Cost_NPV(Outputs,lastmonth,'MATURITYDATEREAL',spotdate,Cost00,Cost01,Cost0,NPV0)
Outputs=get_Cost_NPV(Outputs,thismonth,'EffectDate',lastdate,Cost10,Cost11,Cost1,NPV1)
#现金流
collateral=collateral.rename(columns={'交易编号':'ContractID'})
Outputs=Collateral_Separate(Outputs,collateral,'前端期权费')
Outputs=Collateral_Separate(Outputs,collateral,'前端支付')
Outputs=Collateral_Separate(Outputs,collateral,'部分赎回')
Outputs=Collateral_Separate(Outputs,collateral,'期间结算')
Outputs=Collateral_Separate(Outputs,collateral,'定结期间结算')
Outputs=Collateral_Separate(Outputs,collateral,'其他')
Outputs=Collateral_Separate(Outputs,collateral,'到期结算')
Outputs=Collateral_Separate(Outputs,collateral,'全部赎回')
#检查结果
PnL0=Outputs[Cost0]+Outputs[NPV0]
PnL1=Outputs[Cost1]+Outputs[NPV1]
Upfront0=Outputs['前端期权费']+Outputs['前端支付']
Redemption0=Outputs['部分赎回']
Redemption1=Outputs['全部赎回']
PayOnExpiry=Outputs['到期结算']
Status1='两期同时存续'
Status2='上月合约自然到期'
Status3='上月合约非自然到期'
Status4='本月新起且存续'
Status5='本月新起且到期'
flag1=(PnL0.round(decimals=1)==0)
flag2=(PnL1.round(decimals=1)==0)
flag3=((Outputs[Cost0]-Outputs[Cost1]-Upfront0-Redemption0).round(decimals=1)==0)
flag4=((Redemption0+Redemption1+PayOnExpiry).round(decimals=1)==0)
flag5=((Upfront0+Redemption0+Outputs[Cost1]).round(decimals=1)==0)
Outputs[Status1]=0
Outputs.loc[(~flag1)&(~flag2)&flag3,[Status1]]=1
Outputs.loc[(~flag1)&(~flag2)&(~flag3),[Status1]]=100
Outputs[Status2]=0
Outputs.loc[(Outputs[Cost00]!=0)&flag2,[Status2]]=1
Outputs[Status3]=0
Outputs.loc[(Outputs[NPV0]!=0)&(Outputs[Status1]+Outputs[Status2]==0)&(~flag4),[Status3]]=1
Outputs.loc[(Outputs[NPV0]!=0)&(Outputs[Status1]+Outputs[Status2]==0)&flag4,[Status3]]=100
Outputs[Status4]=0
Outputs.loc[flag1&(~flag2)&flag5,[Status4]]=1
Outputs.loc[flag1&(~flag2)&(~flag5),[Status4]]=100
Outputs[Status5]=0
Outputs.loc[flag1&flag2&((Upfront0!=0)|(PayOnExpiry!=0)|(Redemption1!=0)),[Status5]]=1
Outputs.loc[flag1&flag2&((Upfront0==0)&(PayOnExpiry==0)&(Redemption1==0)),[Status5]]=100
Outputs['Status']='异常'
Outputs.loc[Outputs[Status1]==1,['Status']]=Status1
Outputs.loc[Outputs[Status2]==1,['Status']]=Status2
Outputs.loc[Outputs[Status3]==1,['Status']]=Status3
Outputs.loc[Outputs[Status4]==1,['Status']]=Status4
Outputs.loc[Outputs[Status5]==1,['Status']]=Status5
return Outputs
def Check1(lastmonth,thismonth,collateral,FXRate):
thismonth['合约估值']=pd.to_numeric(thismonth['合约估值'],errors='coerce')
lastmonth['合约估值']=pd.to_numeric(lastmonth['合约估值'],errors='coerce')
thismonth['Upfront结算货币']=pd.to_numeric(thismonth['Upfront结算货币'],errors='coerce')
lastmonth['Upfront结算货币']=pd.to_numeric(lastmonth['Upfront结算货币'],errors='coerce')
thismonth['Upfront结算货币']=thismonth['Upfront结算货币'].replace(np.nan,0.)
lastmonth['Upfront结算货币']=lastmonth['Upfront结算货币'].replace(np.nan,0.)
FXRate=FXRate[['FROMCCY','FX']]
thismonth=pd.merge(thismonth,FXRate,how='left',left_on='币种',right_on='FROMCCY')
lastmonth=pd.merge(lastmonth,FXRate,how='left',left_on='币种',right_on='FROMCCY')
del thismonth['FROMCCY'],lastmonth['FROMCCY']
thismonth['RMB合约估值']=thismonth['合约估值']*thismonth['FX']
lastmonth['RMB合约估值']=lastmonth['合约估值']*lastmonth['FX']
lastmonth['MATURITYDATEREAL']=pd.to_datetime(lastmonth['MATURITYDATEREAL'])
thismonth=thismonth.rename(columns={'起始日':'EffectDate'})
thismonth['EffectDate']=pd.to_datetime(thismonth['EffectDate'])
thismonth=thismonth.rename(columns={'合约编号':'ContractID'})
lastmonth=lastmonth.rename(columns={'合约编号':'ContractID'})
collateral['确认金额(结算货币)']=pd.to_numeric(collateral['确认金额(结算货币)'],errors='coerce')
collateral['确认金额(结算货币)']=collateral['确认金额(结算货币)'].replace(np.nan,0.)
return lastmonth,thismonth,collateral
def Check0(lastmonth,thismonth,collateral):
lastmonth_dupl=lastmonth[lastmonth.duplicated(subset='合约编号')]
thismonth_dupl=thismonth[thismonth.duplicated(subset='合约编号')]
collateral_dupl=collateral[collateral.duplicated()]
lastmonth=lastmonth.drop_duplicates(subset='合约编号')
thismonth=thismonth.drop_duplicates(subset='合约编号')
collateral=collateral.drop_duplicates()
return lastmonth,thismonth,collateral,lastmonth_dupl,thismonth_dupl,collateral_dupl
if __name__=="__main__":
path0=os.path.dirname(os.path.realpath(__file__))+'//'
path1='Opt_DM\TheBegin.xlsx'
path2='Opt_DM\TheEnd.xlsx'
path3='Opt_DM\Collateral.xlsx'
path4='Opt_DM\FX.xlsx'
path5='Opt_DM\Report.xlsx'
spotdate= | pd.to_datetime('2017-11-30') | pandas.to_datetime |
# notebook: 00-oh-preprocess_data.ipynb
# %% [markdown]
# # Data Cleanup and Pre-processing
#
# Before we can analyze the data we need to clean the raw data and bring it to a format suited for the analyses.
# %%
# Basic imports and setup.
import sys
import logging
from pathlib import Path
import pandas as pd
from neuropsymodelcomparison.dataprocessing import analysis
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# %% [markdown]
# Read in raw data from files.
# %%
data_path = Path.cwd() / 'data' # Run from project root.
raw_data_path = data_path / 'raw'
users = pd.read_csv(raw_data_path / 'users.csv', dtype={'gaming_exp': pd.Int8Dtype()})
blocks = pd.read_csv(raw_data_path / 'blocks.csv', index_col='id', parse_dates=['time_iso'],
dtype={'rating': | pd.Int8Dtype() | pandas.Int8Dtype |
"""
DOCSTRING
"""
import matplotlib.pyplot
import numpy
import os
import pandas
import PIL
import seaborn
import skimage
import time
class EDA:
"""
DOCSTRING
"""
def __init__(self):
dict_labels = {
0: "No DR",
1: "Mild",
2: "Moderate",
3: "Severe",
4: "Proliferative DR"}
def __call__(self):
labels = pandas.read_csv("labels/trainLabels.csv")
plot_classification_frequency(labels, "level", "Retinopathy_vs_Frequency_All")
plot_classification_frequency(labels, "level", "Retinopathy_vs_Frequency_Binary", True)
def change_labels(df, category):
"""
Changes the labels for a binary classification.
Either the person has a degree of retinopathy, or they don't.
INPUT
df: Pandas DataFrame of the image name and labels
category: column of the labels
OUTPUT
Column containing a binary classification of 0 or 1
"""
return [1 if l > 0 else 0 for l in df[category]]
def plot_classification_frequency(df, category, file_name, convert_labels=False):
"""
Plots the frequency at which labels occur.
INPUT
df: Pandas DataFrame of the image name and labels
category: category of labels, from 0 to 4
file_name: file name of the image
convert_labels: argument specified for converting to binary classification
RETURN
None
"""
if convert_labels == True:
labels['level'] = change_labels(labels, 'level')
seaborn.set(style="whitegrid", color_codes=True)
seaborn.countplot(x=category, data=labels)
pyplot.title('Retinopathy vs Frequency')
pyplot.savefig(file_name)
return
class ImageToArray:
"""
DOCSTRING
"""
def __call__(self):
start_time = time.time()
labels = | pandas.read_csv("../labels/trainLabels_master_256_v2.csv") | pandas.read_csv |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" zeroshot probing pipeline """
from typing import List, Optional, Tuple
from absl import app
from absl import flags
from absl import logging
from ml_collections import FrozenConfigDict
import pandas as pd
from pandas import DataFrame
from labtools import catch_exp_failures
from labtools import maybe_rlocation
from probing._src.configurable import configurable
from probing.configs import get_configs
from probing.dataset.dataset import create_dataset
from probing.zeroshot.run_probing import run_zeroshot
from probing.zeroshot.run_scoring import zeroshot_scoring
def _zeroshot_pipeline(
configs: List[Tuple[str, FrozenConfigDict]],
ngram_gbc_path: str,
max_examples: Optional[int] = None,
) -> DataFrame:
""" zeroshot partial pipeline """
result_csvs = []
_, meta = create_dataset()
ngram_gbc_path = maybe_rlocation(ngram_gbc_path)
ngrams = | pd.read_csv(ngram_gbc_path) | pandas.read_csv |
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import pydeck as pdk
import matplotlib.pyplot as plt
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import string
from wordcloud import WordCloud
import json
import business_weekday_plot
# [TODO] put a divider in between each viz
st.set_option('deprecation.showPyplotGlobalUse', False)
st.title("What are some opportunities to present useful information for users researching restaurants on Yelp?")
st.markdown("The following data visualizations help a Yelp product team explore different factors that influence \
a diner's restaurant research journey: Restaurant characteristics (such as location, peak hours), and reviews \
from Yelp community.")
st.markdown("Analysis done by: <NAME>(seungmyl) and <NAME>")
@st.cache
# add caching so we load the data only once
def load_data(json_file):
return pd.read_json(json_file, lines=True)
# Loading data, use partially selected portions for review and user.
business_df = load_data(json_file = "../yelp_dataset/yelp_academic_dataset_business.json")
review_df = pd.read_csv("../yelp_dataset/review_pit_restaurant.csv").drop("Unnamed: 0", axis=1)
checkin = load_data(json_file = "../yelp_dataset/yelp_academic_dataset_checkin.json")
# user_df = pd.read_csv("../yelp_dataset/user_top_500k.csv").drop("Unnamed: 0", axis=1)
# Visualization 1
# st.markdown("Visualization 1: When do diners checkin to restaurants across different cities?")
# cities = list(business_df.groupby(["city"]).count().sort_values(by="categories", ascending=False).head(10).index)
# viz1_cities = st.multiselect("Choose cities you wish to compare", cities[:5], cities[:5])
# business_masked_df = business_df[lambda x: x["city"].isin(viz1_cities)]
# business_ids = pd.DataFrame(business_masked_df[['business_id',"city"]].set_index("business_id").to_dict())
# st.write(business_ids)
# checkin = checkin.join(business_ids, on="business_id", how="inner")
# st.write(checkin.head(50))
# dfs = list()
# def parser(row):
# df_parsed = pd.DataFrame(row.date.split(", "), columns=["date"])
# df_parsed["date"] = pd.to_datetime(df_parsed["date"])
# df_parsed["city"] = row["city"]
# df_parsed["weekday"] = df_parsed["date"].dt.day_name()
# df_parsed["hour"] = df_parsed["date"].dt.hour
# dfs.append(df_parsed)
# checkin.apply(lambda x: parser(x), axis=1)
# viz1_df = pd.concat(dfs, axis=1)
# viz1_df.to_csv("visualization1.csv")
# st.write(viz1_df.head())
# viz1 = business_masked_df.groupby(["city", "weekday", "hour"]).sum()
# st.bar_chart(viz1)
# Select the city you want to explore.
# Visualization 1
st.markdown("Visualization 1: When do diners checkin to restaurants across different cities?")
cities = list(business_df.groupby(["city"]).count().sort_values(by="categories", ascending=False).head(10).index)
city = st.selectbox("Choose your favorite city", cities)
city_masked = business_df[lambda x: x["city"]==city][lambda x: x["categories"].str.contains("Restaurant", na=False)]
# Select type of cuisine
cuisine = st.selectbox(
'Select your favorite food',
('Anything', 'Mexican', 'Korean', 'Chinese', 'Pizza', 'American', 'Dessert', 'Salad', 'Burgers', 'Indian'))
attribute_list = ("delivery", "take-out", "parking", "vegetarian", "vegan", "WiFi")
city_cuisine_masked = city_masked
if cuisine != "Anything":
city_cuisine_masked = city_masked[city_masked["categories"].str.contains(cuisine, na=True)]
# Select Business attributes
# options = list(range(len(attribute_list)))
# attributes = st.multiselect("attributes", options, format_func=lambda x: attribute_list[x])
# def attributeFinder(value_dict, attribute):
# try:
# if value_dict.get(attribute,False) not in ["No", "no", False]:
# return True
# except:
# return False
# return False
# city_cuisine_attribute_masked = city_cuisine_masked
# for value in attributes:
# city_cuisine_attribute_masked = city_cuisine_attribute_masked[city_masked["attributes"].apply(lambda x: attributeFinder(x, attribute_list[value]))]
# Plot the map
# Adding code so we can have map default to the center of the data
# midpoint = (np.average(city_masked['latitude']), np.average(city_masked['longitude']))
# layer = pdk.Layer(
# 'ScatterplotLayer',
# data=city_cuisine_masked,
# get_position='[longitude, latitude]',
# get_color='[200, 30, 0, 160]',
# get_radius=100,
# picakble=True,
# wireframe=True,
# )
# view_state = pdk.ViewState(
# latitude=midpoint[0],
# longitude=midpoint[1],
# zoom=10,
# )
# st.pydeck_chart(pdk.Deck(
# map_style='mapbox://styles/mapbox/light-v9',
# initial_view_state=view_state,
# layers=[ layer
# ],
# tooltip={
# "html": "<b>address:</b> {address}"
# "<br/> <b>name:</b> {name}"
# " <br/> <b>stars:</b> {stars} ",
# "style": {"color": "white"},
# },
# ))
st.markdown("Visualization 2: What type of cuisines are we likely to find in this city?")
## Visualization 2: What type of cuisines are we likely to find in this city?
unique_categories = dict()
stop_words = ["Food", "Grocery", "Restaurants", "Nightlife"]
def findUniqueCategory(row):
values = row.split(", ")
for i in values:
if i not in stop_words:
unique_categories[i] = unique_categories.get(i, 0)+1
city_masked["categories"].apply(lambda x: findUniqueCategory(x))
#Only return top 10 categories
unique_categories = pd.Series(pd.Series(unique_categories)).sort_values(ascending=False).head(10)
fig1, ax1 = plt.subplots()
ax1.pie(unique_categories, labels=unique_categories.index, autopct='%1.1f%%', startangle=90)
ax1.axis('equal')
st.pyplot()
# Select the restaurant
restaurant = st.selectbox(
'Select your restaurant',
city_cuisine_masked["name"].to_list())
business_id = city_cuisine_masked[city_cuisine_masked["name"]==restaurant]["business_id"].values[0]
checkin_parsed = business_weekday_plot.dateParser(checkin, business_id)
# [TODO] Add weekday selection
weekday = "Monday"
checkin_df = business_weekday_plot.getCheckinByHour(checkin_parsed, weekday, business_id)
st.markdown("Checkin counts by hour for day: "+weekday)
st.bar_chart(checkin_df)
st.markdown("##Looking at Reviews from Yelp Community")
st.markdown("Visualization 3: What word(s) are most frequently used words to describe different cuisine types?")
# Visualization 3: What word(s) are most frequently used to describe different cuisine types?
# Select type of cuisine
unique_categories = dict()
pit_business_df = business_df[lambda x: x["city"]=="Pittsburgh"][lambda x: x["categories"].str.contains("Restaurant", na=False)]
tokens = dict()
tknzr = TweetTokenizer()
common_keywords = ["good", "place", "food", "restaurant", "service", "like", "also", "one", "menu", "get",
"would", "...", "order", "ordered", "time", "really", "us", "go", "i've", "i'm", "before", "well", "back"
"try", "great", "little", "got", "nice", "even", "could", "came", "much"]
def tokenizer_wrapper(row):
stop = stopwords.words('english') + list(string.punctuation) + common_keywords
tmp = [i for i in tknzr.tokenize(row.lower()) if i not in stop]
for word in tmp:
tokens[word] = tokens.get(word, 0) + 1
categories = ['Mexican', 'Korean', 'Chinese', 'Pizza', 'American', 'Dessert', 'Salad', 'Burgers', 'Indian']
review_cuisine = st.selectbox(
'Select your favorite food for reviews analysis',
["Choose One"] + categories)
if review_cuisine != 'Choose One':
pit_business_df = pit_business_df[pit_business_df["categories"].str.contains(review_cuisine, na=True)]
if st.checkbox("Show Review Wordcloud of cuisine type: " + review_cuisine):
# Tokenize the review text, Will only work on Pittsburgh
# For the sake of computation time, I only took the reviews with at least 10 vote counts
# COMMENT: Would be better to use TF-IDF to actually extract category-specific keywords, but for simplicity, we manually created common words to remove
pit_cuisine_business_df = pit_business_df[pit_business_df["categories"].str.contains(review_cuisine, na=True)]
selected_businesses = pit_cuisine_business_df["business_id"].unique()
review_business_masked = review_df[lambda x: x["business_id"].isin(selected_businesses)]
review_business_masked["vote_total"] = review_business_masked["useful"] + review_business_masked["funny"] + review_business_masked["cool"]
review_business_masked = review_business_masked[lambda x: x["vote_total"]>10]
review_business_masked["text"].apply(lambda x: tokenizer_wrapper(x))
tokens = pd.Series(tokens).sort_values(ascending=False).head(50)
wc = WordCloud().fit_words(tokens)
st.image(wc.to_array())
st.markdown("Visualization 4: How has user appreciation for “cool”, “useful”, and “funny” reviews changed over the years?")
# Visualization 4: How engaging are top reviewers for diners looking to research restaurants?
# I could not get time data for users => changed to reviews
# st.write(user_df.head())
review_df["date"] = | pd.to_datetime(review_df["date"]) | pandas.to_datetime |
import datetime
import numpy as np
import streamlit as st
import pandas as pd
from sqlalchemy import create_engine
import visualization
import joblib
import random
import SessionState
VALUES = [
0,
1,
5,
10,
25,
50,
75,
100,
200,
300,
400,
500,
750,
1_000,
5_000,
10_000,
25_000,
50_000,
75_000,
100_000,
200_000,
# 250_000,
300_000,
400_000,
500_000,
750_000,
1_000_000,
# 2_000_000,
# 3_000_000,
# 6_000_000,
]
BIG_VALUES = [val for val in VALUES if val >= 100_000]
L_SUM = sum([val for val in VALUES[:len(VALUES) // 2]])
R_SUM = sum([val for val in VALUES[len(VALUES) // 2:]])
engine = create_engine('postgresql://{user}:{pw}@{host}:{port}/{dbname}'.
format(user=st.secrets['username'], pw=st.secrets['password'], host=st.secrets['host'],
port=5432,
dbname=st.secrets['dbname']))
session = SessionState.get(run_id=0)
def generate(random_chars=12, alphabet="0123456789abcdef"):
r = random.SystemRandom()
return ''.join([r.choice(alphabet) for i in range(random_chars)])
query_params = st.experimental_get_query_params()
if not hasattr(st, 'game_id') or not query_params:
st.game_id = generate(8)
session.game_id = st.game_id
st.experimental_set_query_params(round_number=1, game_id=st.game_id, prev_offer=0)
def main():
st.set_page_config(
page_title="DEAL OR NO DEAL",
page_icon="🤑",
initial_sidebar_state="expanded")
st.sidebar.title('DEAL OR NO DEAL')
selected_banker = st.sidebar.selectbox('Pick your Banker', ['Random Forest', 'LightGBM', 'XGBoost'], 0)
if st.sidebar.button('New Game'):
new_game()
st.sidebar.markdown("""
This is a simulation of the Deal or No Deal Banker's offers. The code for this project can be found at [my Github](https://github.com/jstock29/dealnodeal) and the data that I painstakingly collected from over 100 episodes of the show is on [my Kaggle](https://www.kaggle.com/jaredstock/deal-or-no-deal-game-data).
You can see what the RoboBanker will offer by simulating a board at various rounds. Each round you should pick the correct number of values from the board:
1. Pick 6 - 6 Total
2. Pick 5 - 11 Total
3. Pick 4 - 15 Total
4. Pick 3 - 18 Total
5. Pick 2 - 20 Total
6. Pick 1 - 21 Total
7. Pick 1 - 22 Total
8. Pick 1 - 23 Total
9. Pick 1 - 24 Total
10. Pick 1 -25 Total
After each round you can see what my RoboBanker is offering you and decide if that's a deal you want to take or not. I will not give you that money though.
FYI: Anonymous game data is sent to my database so I can maybe do stuff with it later. I don't know why that would sketch you out, this is all fake, but there you go.
""")
st.sidebar.caption('<NAME> | NYC | 2021')
app_state = st.experimental_get_query_params()
game_id = app_state['game_id'][0]
round_number = int(app_state['round_number'][0])
prev_offer = float(app_state['prev_offer'][0])
offer = 0.
# st.write(app_state)
st.header('Board')
st.write('')
col1, col2, col3 = st.beta_columns(3)
l_cols = VALUES[:len(VALUES) // 2]
r_cols = VALUES[len(VALUES) // 2:]
model = joblib.load(f'bankers/{selected_banker}.pkl')
with col1:
values_1 = [st.checkbox(str(val), key=session.run_id) for val in VALUES[:len(VALUES) // 2]]
left_sum = sum([val for i, val in enumerate(l_cols) if not values_1[i]])
with col2:
values_2 = [st.checkbox(str(val), key=session.run_id) for val in VALUES[len(VALUES) // 2:]]
right_sum = sum([val for i, val in enumerate(r_cols) if not values_2[i]])
values = values_1 + values_2
choices = [val for i, val in enumerate(VALUES) if values[i]]
remaining = [val for i, val in enumerate(VALUES) if not values[i]]
remaining_bigs = [_ for _ in remaining if _ in BIG_VALUES]
average = np.average(remaining)
_max = max(remaining)
if right_sum == 0:
balance = (left_sum / L_SUM)
elif left_sum == 0:
balance = (right_sum / R_SUM)
else:
balance = (right_sum / R_SUM) / (left_sum / L_SUM)
ev = expected_value(remaining)
with col3:
st.subheader('Info')
st.write(f'Round: {round_number}')
st.write(f'Picked: {len(choices)}')
st.write(f'Previous Offer: {prev_offer}')
st.write(f'Expected Value: {round(ev, 0)}')
st.write(f'Probability of having a big value: {round(len(remaining_bigs) / len(remaining) * 100, 1)}%')
st.subheader('Banker Offer')
if len(choices) > 5:
X = pd.DataFrame({'Round': [round_number], 'Board Average': [ev], 'Previous Offer': [prev_offer]})
p = model.predict(X)
offer = float(p[0])
st.write(f'Offer: ${round(float(offer), 2)}')
if offer / ev <= 1:
st.progress(offer / ev)
else:
st.progress(1)
st.caption(f'Offer % of Expected Value: {round((offer / ev) * 100, 2)}%')
else:
st.info('Pick values to see offers')
col14, col15 = st.beta_columns(2)
if len(choices) == 6 or len(choices) == 11 or len(choices) == 15 or len(choices) == 18 or len(choices) >= 20:
with col14:
if st.button('Deal!'):
round_data = {
"Game ID": game_id,
"Round": round_number,
"Remaining Values": str(remaining),
"Board Value": sum(remaining),
"Board Average": round(average, 0),
"Board Balance": round(balance, 3),
"Probability of Big Value": round(len(remaining_bigs) / len(remaining), 3),
"Previous Offer": prev_offer,
"Offer": round(offer, 0),
"Offer Percent of Average": round(offer / average, 4),
"model":selected_banker,
"datetime":datetime.datetime.now(),
"Deal": True
}
df = pd.DataFrame(round_data, index=[0])
populate_round(df, 'player_games')
with col15:
if st.button('No Deal!'):
round_data = {
"Game ID": game_id,
"Round": round_number,
"Remaining Values": str(remaining),
"Board Value": sum(remaining),
"Board Average": round(average, 0),
"Board Balance": round(balance, 3),
"Probability of Big Value": round(len(remaining_bigs) / len(remaining), 3),
"Previous Offer": prev_offer,
"Offer": round(offer, 0),
"Offer Percent of Average": round(offer / average, 4),
"model":selected_banker,
"datetime":datetime.datetime.now(),
"Deal": False
}
round_number += 1
st.experimental_set_query_params(round_number=round_number, game_id=game_id, prev_offer=round(offer, 0))
df = | pd.DataFrame(round_data, index=[0]) | pandas.DataFrame |
import sys
import os
import json
import pandas as pd
from sklearn.utils import class_weight
import numpy as np
from keras import optimizers, callbacks
import tensorflow as tf
from sklearn.metrics import accuracy_score
from utils.ml_utils import data_to_pkl
from arg_parser import UserArgs, ArgParser
import matplotlib
font = {'size': 10}
matplotlib.rc('font', **font)
class DragonTrainer(object):
def __init__(self, model_name, ext):
self.model_name = model_name
base_train_dir = UserArgs.base_train_dir
self.training_dir_wo_ext = os.path.join(
base_train_dir,
model_name)
self.training_dir = os.path.join(
base_train_dir,
model_name + ext)
if UserArgs.test_mode:
self.training_dir = os.path.join(self.training_dir, "test")
self.training_dir_wo_ext = os.path.join(self.training_dir_wo_ext, "test")
def create_training_dir(self):
# check if directory already exists
if os.path.exists(self.training_dir):
print(f"Training dir {self.training_dir} already exists..")
if os.path.exists(os.path.join(self.training_dir, "best-checkpoint")):
print("Found pretrained model")
return False
else:
raise Exception(f"Training dir {self.training_dir} already exists.. "
f"No pretrained model found...")
print(f"Current training directory for this run: {self.training_dir}")
os.makedirs(self.training_dir)
# save current hyper params to training dir
ArgParser.save_to_file(UserArgs, self.training_dir, self.model_name)
return True
@staticmethod
def _init_optimizer(optimizer, lr):
opt_name = optimizer.lower()
if opt_name == 'adam':
optimizer = optimizers.Adam(lr=lr)
elif opt_name == 'rmsprop':
optimizer = optimizers.RMSprop(lr=lr)
elif opt_name == 'sgd':
optimizer = optimizers.SGD(lr=lr, momentum=0.9, nesterov=True)
else:
raise ValueError('unknown optimizer %s' % opt_name)
return optimizer
@staticmethod
def subset_accuracy(y_gt, y_prediction, subset_indices):
y_prediction = tf.transpose(tf.gather(tf.transpose(y_prediction), subset_indices))
arg_p = tf.gather(subset_indices, tf.arg_max(y_prediction, 1))
y_gt = tf.transpose(tf.gather(tf.transpose(y_gt), subset_indices))
arg_y = tf.gather(subset_indices, tf.arg_max(y_gt, 1))
return tf.reduce_mean(tf.to_float(tf.equal(arg_y, arg_p)))
@staticmethod
def calc_dragon_wgt(Y_true, Y_pred, train_distribution):
classes_idx, n_samples = train_distribution
acc_per_class = []
weights_per_class = []
for i, (c,n) in enumerate(zip(classes_idx,n_samples)):
idx = np.where(Y_true == c)[0]
if len(idx) != 0:
acc_per_class = acc_per_class + [sum(Y_true[idx] == Y_pred[idx])/len(idx)]
weights_per_class = weights_per_class + [n]
weights_per_class = (np.array(weights_per_class) / sum(weights_per_class))
return sum(acc_per_class*weights_per_class)
@staticmethod
def calc_per_class_acc(Y_true, Y_pred):
counts_per_class = pd.Series(Y_true).value_counts().to_dict()
accuracy = ((Y_pred == Y_true) / np.array(
[counts_per_class[y] for y in Y_true])).sum() / len(counts_per_class)
return accuracy
@staticmethod
def balance_data_with_sample_weights(Y_labels, add_dummy_class=True):
class_weights = class_weight.compute_class_weight('balanced',
np.unique(Y_labels),
Y_labels)
if add_dummy_class:
class_weights = np.insert(class_weights, 0, 0) # add 1 zero so 200 -> 201
sample_weights = np.array([class_weights[y] for y in Y_labels])
return sample_weights
@staticmethod
def harmonic_acc(ms_acc, fs_acc):
return (2 * (ms_acc * fs_acc)) / (ms_acc + fs_acc)
@staticmethod
def training_evaluation(model_instance, X_data, Y_data, classes_subsets, eval_sp_params):
# gextract classes subsets
all_classes, ms_classes, fs_classes = classes_subsets
# Estimate accuracies: regualr accuracy, per class accuracy and dragon wgt accuracy
X, X_many, X_few = X_data
Y, Y_many, Y_few = Y_data
# all classes accuracy (generalized accuracy)
_, _, reg_acc, pc_acc, wgt_acc = \
DragonTrainer.__evaluate(model_instance, X, Y, all_classes, eval_sp_params)
# ms classes accuracy (generalized many-shot accuracy)
_, _, ms_reg_acc, ms_pc_acc, ms_wgt_acc = \
DragonTrainer.__evaluate(model_instance, X_many, Y_many, all_classes, eval_sp_params)
# fs classes accuracy (generalized few-shot accuracy)
_, _, fs_reg_acc, fs_pc_acc, fs_wgt_acc = \
DragonTrainer.__evaluate(model_instance, X_few, Y_few, all_classes, eval_sp_params)
reg_harmonic_acc = DragonTrainer.harmonic_acc(ms_pc_acc, fs_pc_acc)
pc_harmonic_acc = DragonTrainer.harmonic_acc(ms_pc_acc, fs_pc_acc)
wgt_harmonic_acc = DragonTrainer.harmonic_acc(ms_pc_acc, fs_pc_acc)
# many among many accuracy
_, _, ms_ms_reg_acc, ms_ms_pc_acc, ms_ms_wgt_acc = \
DragonTrainer.__evaluate(model_instance, X_many, Y_many, ms_classes, eval_sp_params)
# few among few accuracy
_, _, fs_fs_reg_acc, fs_fs_pc_acc, fs_fs_wgt_acc = \
DragonTrainer.__evaluate(model_instance, X_few, Y_few, fs_classes, eval_sp_params)
res_df = pd.DataFrame(columns=['reg_acc', 'per_class_acc', 'wgt_acc'])
res_df.loc["All"] = [reg_acc, pc_acc, wgt_acc]
#res_df.loc["MS"] = [ms_reg_acc, ms_pc_acc, ms_wgt_acc]
#res_df.loc["FS"] = [fs_reg_acc, fs_pc_acc, fs_wgt_acc]
#res_df.loc["Harmonic"] = [reg_harmonic_acc, pc_harmonic_acc, wgt_harmonic_acc]
#res_df.loc["MS/MS"] = [ms_ms_reg_acc, ms_ms_pc_acc, ms_ms_wgt_acc]
#res_df.loc["FS/FS"] = [fs_fs_reg_acc, fs_fs_pc_acc, fs_fs_wgt_acc]
print(res_df)
res = {}
res['val_wgtAcc'] = wgt_acc
res['val_perClassAcc'] = pc_acc
#res['val_ms_pc_acc'] = ms_pc_acc
#res['val_fs_pc_acc'] = fs_pc_acc
#res['val_har_acc'] = pc_harmonic_acc
return res
def prepare_callbacks_for_training(self, model_instance, eval_params, use_custom_eval=True):
"""
Prepare Keras Callbacks for model training
Returns a list of keras callbacks
"""
training_CB = []
if eval_params is None:
monitor, mon_mode = 'val_acc', 'max'
else:
X_val, Y_val, val_classes, train_distribution, \
ms_classes, fs_classes, X_val_many, Y_val_many, X_val_few, Y_val_few = eval_params
evaluate_specific_params = (train_distribution, ms_classes, fs_classes)
# Set the monitor (metric) for validation.
# This is used for early-stopping during development.
monitor, mon_mode = None, None
if use_custom_eval:
if UserArgs.train_dist == "dragon":
monitor, mon_mode = 'val_wgtAcc', 'max'
else:
monitor, mon_mode = 'val_perClassAcc', 'max'
training_CB += [callbacks.LambdaCallback(
on_epoch_end=lambda epoch, logs: logs.update(
DragonTrainer.training_evaluation(model_instance, (X_val, X_val_many, X_val_few),
(Y_val, Y_val_many, Y_val_few),
(val_classes, ms_classes, fs_classes),
evaluate_specific_params))
)]
else:
monitor, mon_mode = 'val_har_acc', 'max'
training_CB += [callbacks.LambdaCallback(
on_epoch_end=lambda epoch, logs: logs.update(
DragonTrainer.training_evaluation(model_instance, (X_val, X_val_many, X_val_few),
(Y_val, Y_val_many, Y_val_few),
(val_classes, ms_classes, fs_classes),
evaluate_specific_params))
)]
print(f'monitoring = {monitor}')
# Save a model checkpoint only when monitor indicates that the best performance so far
training_CB += [
callbacks.ModelCheckpoint(monitor=monitor, mode=mon_mode,
save_best_only=True,
filepath=os.path.join(self.training_dir, 'best-checkpoint'),
verbose=UserArgs.verbose)]
# Set an early stopping callback
training_CB += [callbacks.EarlyStopping(monitor=monitor, mode=mon_mode,
patience=UserArgs.patience,
verbose=UserArgs.verbose,
min_delta=UserArgs.min_delta)]
# Log training history to CSV
training_CB += [callbacks.CSVLogger(os.path.join(self.training_dir, 'training_log.csv'),
separator='|', append=True)]
# Flush stdout buffer on every epoch
training_CB += [callbacks.LambdaCallback(on_epoch_end=lambda epoch, logs: sys.stdout.flush())]
return training_CB
@staticmethod
def __evaluate(model_instance, X, Y, classes_subset, eval_sp_params):
# Inner function to avoid code duplication
# returns: regular accuracy score, per class accuracy score, dragon wgt score
train_distribution, ms_classes, fs_classes = eval_sp_params
predictions = model_instance.predict_val_layer(X)
subset_preds = classes_subset[(predictions[:, classes_subset]).argmax(axis=1)]
# evaluate performance using regular accuracy function
reg_acc = float(accuracy_score(Y, subset_preds))
# evaluate performance using per class accuracy
pc_acc = DragonTrainer.calc_per_class_acc(Y, subset_preds)
# evaluate performance using average accuracy score function (dragon evaluation)
wgt_acc = DragonTrainer.calc_dragon_wgt(Y, subset_preds, train_distribution)
return predictions, subset_preds, reg_acc, pc_acc, wgt_acc
def evaluate_and_save_metrics(self, model_instance,
train_data, val_data, test_data, test_eval_params,
plot_thresh=True,
should_save_predictions=True,
should_save_metrics=True):
X_train, Y_train, Attributes_train, train_classes = train_data
X_val, Y_val, Attributes_val, val_classes = val_data
X_test, Y_test, Attributes_test, test_classes = test_data
_, _, _, train_distribution, \
ms_classes, fs_classes, X_test_many, Y_test_many, X_test_few, Y_test_few = test_eval_params
evaluate_specific_params = (train_distribution, ms_classes, fs_classes)
# Evaluate on train data
train_preds_score, train_preds_argmax, train_reg_acc, train_pc_acc, train_wgt_acc \
= DragonTrainer.__evaluate(model_instance, X_train, Y_train, train_classes, evaluate_specific_params)
# Evaluate on val data
val_preds_score, val_preds_argmax, val_reg_acc, val_pc_acc, val_wgt_acc = \
DragonTrainer.__evaluate(model_instance, X_val, Y_val, val_classes, evaluate_specific_params)
# Evaluate on test data
test_preds_score, test_preds_argmax, test_reg_acc, test_pc_acc, test_wgt_acc = \
DragonTrainer.__evaluate(model_instance, X_test, Y_test, test_classes, evaluate_specific_params)
# Print Results
res_df = | pd.DataFrame(columns=['reg_acc', 'per_class_acc', 'wgt_acc']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 19 16:49:28 2018
@author: <NAME>
__________________________________________________
### MacPyver.vector ###
### The Swissknife like Python-Package for ###
### work in general and with GIS ###
__________________________________________________
"""
import os
import pandas as pd
from osgeo import ogr
import numpy as np
#import cPickle as pickle
class shp_attr_tbl():
''' put in full path of shp file
init creates list of fieldnames in the shape
with the method get_attr_tbl it reads in the dbf to a pandas dataframe
possible to read dbf's directly to dataframe, just put ending .dbf instead
of .shp
'''
def __init__(self,shp_name):
#dictionary to translate the geometry
_geometry_dic = {-2147483647:"Point25D",
-2147483646:"LineString25D",
-2147483645:"Polygon25D",
-2147483644:"MultiPoint25D",
-2147483643:"MultiLineString25D",
-2147483642:"MultiPolygon25D",
0: "Geometry",
1: "Point",
2: "Line",
3:"Polygon",
4:"MultiPoint",
5: "MultiLineString",
6: "MultiPolygon",
100: "No Geometry"}
self.name =shp_name.split(os.sep)[-1]
self.path = (os.sep).join(shp_name.split(os.sep)[:-1])
driver = ogr.GetDriverByName("ESRI Shapefile")
ds = driver.Open(shp_name)
layer = ds.GetLayer()
layerDefinition = layer.GetLayerDefn()
self.fieldnames = []
if not shp_name.endswith('.dbf'):
self.geometrytype = _geometry_dic[layer.GetGeomType()]
self.extent = layer.GetExtent()
self.spatialref = layer.GetSpatialRef().ExportToPrettyWkt()
self.featurecount = layer.GetFeatureCount()
for i in range(layerDefinition.GetFieldCount()):
self.fieldnames.append(layerDefinition.GetFieldDefn(i).GetName())
def get_attr_tbl(self, fields = None):
'''if no fields passed, it will read in all columns,
if some field passed, it will just read this subset
returned data is pandas dataframe'''
if not fields:
used_fields = self.fieldnames
else:
#create difference list
diffl = list(set(fields).difference(self.fieldnames))
#create intersection list
intersl = list(set(fields).intersection(self.fieldnames))
if diffl:
print ("ERROR: one or more fields are not in fieldnames:\n{0}".format(diffl))
print ("used matching fields to create the attribute table:\n{0}".format(intersl))
#create list of columns (fields) to use
used_fields = intersl
#create empty dictionary to store all values
dic = {}
for fi in used_fields:
dic[fi] = []
#reset pointer to begining of the file
driver = ogr.GetDriverByName("ESRI Shapefile")
ds = driver.Open(self.path + os.sep + self.name)
layer = ds.GetLayer()
#fill dic per row in layer / and per field
for feature in layer:
for fi in used_fields:
get_value = feature.GetField(fi)
#if missing value np.nan is assigned
if not get_value:
get_value = np.nan
dic[fi].append(get_value)
#save as pd in object
self.attributes = | pd.DataFrame(dic) | pandas.DataFrame |
import od_lib.definitions.path_definitions as path_definitions
import pandas as pd
import datetime
import os
# output directory
ELECTORAL_TERMS = path_definitions.ELECTORAL_TERMS
save_path = os.path.join(ELECTORAL_TERMS, "electoral_terms.csv")
if not os.path.exists(ELECTORAL_TERMS):
os.makedirs(ELECTORAL_TERMS)
electoral_terms = [
{
"id": 1,
"start_date": (
datetime.datetime.strptime("1949-09-07", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1953-10-05", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 2,
"start_date": (
datetime.datetime.strptime("1953-10-06", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1957-10-14", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 3,
"start_date": (
datetime.datetime.strptime("1957-10-15", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1961-10-16", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 4,
"start_date": (
datetime.datetime.strptime("1961-10-17", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1965-10-18", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 5,
"start_date": (
datetime.datetime.strptime("1965-10-19", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1969-10-19", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 6,
"start_date": (
datetime.datetime.strptime("1969-10-20", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1972-12-12", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 7,
"start_date": (
datetime.datetime.strptime("1972-12-13", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1976-12-13", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 8,
"start_date": (
datetime.datetime.strptime("1976-12-14", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1980-11-03", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 9,
"start_date": (
datetime.datetime.strptime("1980-11-04", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1983-03-28", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 10,
"start_date": (
datetime.datetime.strptime("1983-03-29", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1987-02-17", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 11,
"start_date": (
datetime.datetime.strptime("1987-02-18", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1990-12-19", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 12,
"start_date": (
datetime.datetime.strptime("1990-12-20", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1994-11-09", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 13,
"start_date": (
datetime.datetime.strptime("1994-11-10", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("1998-10-25", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 14,
"start_date": (
datetime.datetime.strptime("1998-10-26", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("2002-10-16", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 15,
"start_date": (
datetime.datetime.strptime("2002-10-17", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("2005-10-17", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 16,
"start_date": (
datetime.datetime.strptime("2005-10-18", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("2009-10-26", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 17,
"start_date": (
datetime.datetime.strptime("2009-10-27", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("2013-10-21", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 18,
"start_date": (
datetime.datetime.strptime("2013-10-22", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("2017-10-23", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
{
"id": 19,
"start_date": (
datetime.datetime.strptime("2017-10-24", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
"end_date": (
datetime.datetime.strptime("2021-10-24", "%Y-%m-%d")
- datetime.datetime(1970, 1, 1)
).total_seconds(),
},
]
| pd.DataFrame(electoral_terms) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
train = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train-1542865627584.csv")
beneficiary = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Beneficiarydata-1542865627584.csv")
inpatient = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Inpatientdata-1542865627584.csv")
outpatient = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Outpatientdata-1542865627584.csv")
tt = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test-1542969243754.csv")
tb = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Beneficiarydata-1542969243754.csv")
ti = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Inpatientdata-1542969243754.csv")
to = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Outpatientdata-1542969243754.csv")
# In[3]:
df_procedures1 = pd.DataFrame(columns = ['Procedures'])
df_procedures1['Procedures'] = pd.concat([inpatient["ClmProcedureCode_1"], inpatient["ClmProcedureCode_2"], inpatient["ClmProcedureCode_3"], inpatient["ClmProcedureCode_4"], inpatient["ClmProcedureCode_5"], inpatient["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures1['Procedures'].head(10)
# In[4]:
df_procedures1.shape
# In[5]:
grouped_procedure_df = df_procedures1['Procedures'].value_counts()
grouped_procedure_df
# In[6]:
df_diagnosis = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis['Diagnosis'] = pd.concat([inpatient["ClmDiagnosisCode_1"], inpatient["ClmDiagnosisCode_2"], inpatient["ClmDiagnosisCode_3"], inpatient["ClmDiagnosisCode_4"], inpatient["ClmDiagnosisCode_5"], inpatient["ClmDiagnosisCode_6"], inpatient["ClmDiagnosisCode_7"], inpatient["ClmDiagnosisCode_8"], inpatient["ClmDiagnosisCode_9"], inpatient["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis['Diagnosis'].head(10)
# In[7]:
df_diagnosis.shape
# In[8]:
grouped_diagnosis_df = df_diagnosis['Diagnosis'].value_counts()
grouped_diagnosis_df
# In[9]:
grouped_procedure_df1 = grouped_procedure_df.to_frame()
grouped_procedure_df1
# In[10]:
grouped_procedure_df1.columns = ['count']
grouped_procedure_df1
# In[11]:
grouped_procedure_df1['Procedure'] = grouped_procedure_df1.index
grouped_procedure_df1
# In[12]:
grouped_procedure_df1['Percentage'] = (grouped_procedure_df1['count']/sum(grouped_procedure_df1['count']))*100
grouped_procedure_df1['Percentage']
# In[13]:
grouped_diagnosis_df = grouped_diagnosis_df.to_frame()
grouped_diagnosis_df.columns = ['count']
grouped_diagnosis_df['Diagnosis'] = grouped_diagnosis_df.index
grouped_diagnosis_df['Percentage'] = (grouped_diagnosis_df['count']/sum(grouped_diagnosis_df['count']))*100
grouped_diagnosis_df['Percentage']
# In[14]:
# taking only top 20
plot_procedure_df1 = grouped_procedure_df1.head(20)
plot_diagnosis_df1 = grouped_diagnosis_df.head(20)
# In[15]:
# Plotting the most commonly used diagnosis and procedures
from matplotlib import pyplot as plt
plot_procedure_df1['Procedure'] = plot_procedure_df1['Procedure'].astype(str)
plot_procedure_df1.sort_values(by=['Percentage'])
plot_procedure_df1.plot(x ='Procedure', y='Percentage', kind='bar', color ='green',
title='Procedure Distribution- Inpatient', figsize=(15,10));
# In[16]:
plot_diagnosis_df1['Diagnosis'] = plot_diagnosis_df1['Diagnosis'].astype(str)
plot_diagnosis_df1.sort_values(by=['Percentage'])
plot_diagnosis_df1.plot(x ='Diagnosis', y='Percentage', kind='bar', color ='green',
title='Diagnosis Distribution- Inpatient', figsize=(15,10));
# In[17]:
df_procedures2 = pd.DataFrame(columns = ['Procedures'])
df_procedures2['Procedures'] = pd.concat([outpatient["ClmProcedureCode_1"], outpatient["ClmProcedureCode_2"], outpatient["ClmProcedureCode_3"], outpatient["ClmProcedureCode_4"], outpatient["ClmProcedureCode_5"], outpatient["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures2['Procedures'].head(10)
# In[18]:
grouped_procedure_df2 = df_procedures2['Procedures'].value_counts()
# In[19]:
df_diagnosis2 = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis2['Diagnosis'] = pd.concat([outpatient["ClmDiagnosisCode_1"], outpatient["ClmDiagnosisCode_2"], outpatient["ClmDiagnosisCode_3"], outpatient["ClmDiagnosisCode_4"], outpatient["ClmDiagnosisCode_5"], outpatient["ClmDiagnosisCode_6"], outpatient["ClmDiagnosisCode_7"], outpatient["ClmDiagnosisCode_8"], outpatient["ClmDiagnosisCode_9"], outpatient["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis2['Diagnosis'].head(10)
grouped_diagnosis_df2 = df_diagnosis2['Diagnosis'].value_counts()
# In[20]:
grouped_procedure_df_op = grouped_procedure_df2.to_frame()
grouped_procedure_df_op.columns = ['count']
grouped_procedure_df_op['Procedure'] = grouped_procedure_df_op.index
grouped_procedure_df_op['Percentage'] = (grouped_procedure_df_op['count']/sum(grouped_procedure_df_op['count']))*100
grouped_procedure_df_op['Percentage']
# In[21]:
grouped_diagnosis_df_op = grouped_diagnosis_df2.to_frame()
grouped_diagnosis_df_op.columns = ['count']
grouped_diagnosis_df_op['Diagnosis'] = grouped_diagnosis_df_op.index
grouped_diagnosis_df_op['Percentage'] = (grouped_diagnosis_df_op['count']/sum(grouped_diagnosis_df_op['count']))*100
grouped_diagnosis_df_op['Percentage']
# In[22]:
# taking only top 20
plot_procedure_df2 = grouped_procedure_df_op.head(20)
plot_diagnosis_df2 = grouped_diagnosis_df_op.head(20)
# In[23]:
# Plotting the most commonly used diagnosis and procedures
from matplotlib import pyplot as plt
plot_procedure_df2['Procedure'] = plot_procedure_df2['Procedure'].astype(str)
plot_procedure_df2.sort_values(by=['Percentage'])
plot_procedure_df2.plot(x ='Procedure', y='Percentage', kind='bar', color ='yellow',
title='Procedure Distribution- Outpatient', figsize=(15,7));
# In[24]:
plot_diagnosis_df2['Diagnosis'] = plot_diagnosis_df2['Diagnosis'].astype(str)
plot_diagnosis_df2.sort_values(by=['Percentage'])
plot_diagnosis_df2.plot(x ='Diagnosis', y='Percentage', kind='bar', color ='yellow',
title='Diagnosis Distribution- Outpatient', figsize=(15,7))
# In[25]:
T_fraud = train['PotentialFraud'].value_counts()
grouped_train_df = T_fraud.to_frame()
grouped_train_df.columns = ['count']
grouped_train_df['Fraud'] = grouped_train_df.index
grouped_train_df['Percentage'] = (grouped_train_df['count']/sum(grouped_train_df['count']))*100
grouped_train_df['Percentage'].plot( kind='bar',color = "blue", title = 'Distribution')
# In[26]:
Train_f = pd.DataFrame(columns = ['PotentialFraud', 'Provider'])
Train_f = train.loc[(train['PotentialFraud'] == 'Yes')]
Train_f
# In[27]:
fraud_provider_ip_df = pd.merge(inpatient, Train_f, how='inner', on='Provider')
fraud_provider_ip_df
# In[28]:
len(fraud_provider_ip_df)
# In[29]:
(len(fraud_provider_ip_df)/len(inpatient)) * 100
# In[30]:
fraud_provider_op_df = pd.merge(outpatient, Train_f, how='inner', on='Provider')
fraud_provider_op_df
# In[31]:
len(fraud_provider_op_df)
# In[32]:
(len(fraud_provider_op_df)/len(outpatient))*100
# In[33]:
df_procedures2 = pd.DataFrame(columns = ['Procedures'])
df_procedures2['Procedures'] = pd.concat([fraud_provider_ip_df["ClmProcedureCode_1"], fraud_provider_ip_df["ClmProcedureCode_2"], fraud_provider_ip_df["ClmProcedureCode_3"], fraud_provider_ip_df["ClmProcedureCode_4"], fraud_provider_ip_df["ClmProcedureCode_5"], fraud_provider_ip_df["ClmProcedureCode_6"]], axis=0, sort=True).dropna()
df_procedures2['Procedures'].head(10)
# In[34]:
grouped_F_procedure_df = df_procedures2['Procedures'].value_counts()
grouped_F_procedure_df
# In[35]:
grouped_F_procedure_df2 = grouped_F_procedure_df.to_frame()
grouped_F_procedure_df2.columns = ['count']
grouped_F_procedure_df2['Procedure'] = grouped_F_procedure_df2.index
grouped_F_procedure_df2['Percentage'] = (grouped_F_procedure_df2['count']/sum(grouped_F_procedure_df2['count']))*100
grouped_F_procedure_df2['Percentage']
# In[36]:
df_diagnosis2 = pd.DataFrame(columns = ['Diagnosis'])
df_diagnosis2['Diagnosis'] = pd.concat([fraud_provider_ip_df["ClmDiagnosisCode_1"], fraud_provider_ip_df["ClmDiagnosisCode_2"], fraud_provider_ip_df["ClmDiagnosisCode_3"], fraud_provider_ip_df["ClmDiagnosisCode_4"], fraud_provider_ip_df["ClmDiagnosisCode_5"], fraud_provider_ip_df["ClmDiagnosisCode_6"], fraud_provider_ip_df["ClmDiagnosisCode_7"], fraud_provider_ip_df["ClmDiagnosisCode_8"], fraud_provider_ip_df["ClmDiagnosisCode_9"], fraud_provider_ip_df["ClmDiagnosisCode_10"]], axis=0, sort=True).dropna()
df_diagnosis2['Diagnosis'].head(10)
# In[37]:
grouped_F_diagnosis_df = df_diagnosis2['Diagnosis'].value_counts()
grouped_F_diagnosis_df
# In[38]:
grouped_F_diagnosis_df2 = grouped_F_diagnosis_df.to_frame()
grouped_F_diagnosis_df2.columns = ['count']
grouped_F_diagnosis_df2['Diagnosis'] = grouped_F_diagnosis_df2.index
grouped_F_diagnosis_df2['Percentage'] = (grouped_F_diagnosis_df2['count']/sum(grouped_F_diagnosis_df2['count']))*100
grouped_F_diagnosis_df2['Percentage']
# In[39]:
plot_F_procedure_df1 = grouped_F_procedure_df2.head(20)
plot_F_diagnosis_df1 = grouped_F_diagnosis_df2.head(20)
# In[40]:
plot_F_procedure_df1.plot(x ='Procedure', y='Percentage', kind = 'bar', color ='g', figsize=(15,7))
# In[41]:
plot_F_diagnosis_df1.plot(x ='Diagnosis', y='Percentage', kind = 'bar', color ='y', figsize=(15,7))
# In[42]:
df_procedures_op2 = pd.DataFrame(columns = ['Procedures'])
df_procedures_op2['Procedures'] = | pd.concat([fraud_provider_op_df["ClmProcedureCode_1"], fraud_provider_op_df["ClmProcedureCode_2"], fraud_provider_op_df["ClmProcedureCode_3"], fraud_provider_op_df["ClmProcedureCode_4"], fraud_provider_op_df["ClmProcedureCode_5"], fraud_provider_op_df["ClmProcedureCode_6"]], axis=0, sort=True).dropna() | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 26 17:17:19 2019
@author: sdenaro
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
prices= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
from itertools import product
import numpy as np
import pandas as pd
from scipy import stats
import torch
from tensorqtl import core
from src import logger
def QTL_pairwise(genotypes_df, phenotypes_df, residualizer=None, report_maf=False, return_r_matrix=False):
"""
Wrapper for `tensorqtl.core.calculate_corr` and reimplementation of `tensorqtl.cis.calculate_association`.
Sample names must be axis 0 for each input (i.e. index for pd.Series and columns for pd.DataFrame)
"""
if isinstance(genotypes_df, pd.Series): genotypes_df = genotypes_df.to_frame().T
if isinstance(phenotypes_df, pd.Series): phenotypes_df = phenotypes_df.to_frame().T
assert genotypes_df.columns.equals(phenotypes_df.columns)
# Prepare variables as torch tensors
genotypes_t = torch.tensor(genotypes_df.values, dtype=torch.float).to("cpu")
phenotypes_t = torch.tensor(phenotypes_df.values, dtype=torch.float).to("cpu")
core.impute_mean(genotypes_t)
dof = genotypes_t.shape[1] - 2 if residualizer is None else residualizer.dof
# Compute pairwise correlations and associated stats
r_nominal_t, genotype_var_t, phenotype_var_t = core.calculate_corr(genotypes_t, phenotypes_t, residualizer=residualizer, return_var=True)
std_ratio_t = torch.sqrt(phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1))
r_nominal_t = r_nominal_t.squeeze()
r2_nominal_t = r_nominal_t.double().pow(2)
slope_t = r_nominal_t * std_ratio_t.squeeze()
tstat_t = r_nominal_t * torch.sqrt(dof / (1 - r2_nominal_t))
slope_se_t = (slope_t.double() / tstat_t).float()
if return_r_matrix:
return pd.DataFrame(r_nominal_t.numpy(), index=genotypes_df.index, columns=phenotypes_df.index)
# Prepare results as dataframe.
genotype_ids, phenotype_ids = zip(*product(genotypes_df.index, phenotypes_df.index))
results = pd.DataFrame({
"variant_id": genotype_ids,
"phenotype_id": phenotype_ids,
"tstat": tstat_t.flatten().numpy(),
"slope": slope_t.flatten().numpy(),
"slope_se": slope_se_t.flatten().numpy(),
"r": r_nominal_t.flatten().numpy(),
"r2": r2_nominal_t.flatten().numpy()
})
results["pval_nominal"] = 2*stats.t.cdf(-np.abs(results["tstat"]), dof)
if report_maf:
# calculate MAF
n2 = 2 * genotypes_t.shape[1] # total number of alleles
af_t = genotypes_t.sum(1) / n2 # allele frequency
ix_t = af_t <= 0.5
maf_t = torch.where(ix_t, af_t, 1 - af_t) # minor allele frequency
# calculate MA samples and counts
m = genotypes_t > 0.5
a = m.sum(1).int()
b = (genotypes_t < 1.5).sum(1).int()
ma_samples_t = torch.where(ix_t, a, b) # number of samples with a minor allele
a = (genotypes_t * m.float()).sum(1).int()
ma_count_t = torch.where(ix_t, a, n2-a) # total number of minor alleles
results["maf"] = maf_t.flatten().numpy()
results["ma_samples"] = ma_samples_t.flatten().numpy()
results["ma_count"] = ma_count_t.flatten().numpy()
return results.sort_values("pval_nominal")
#--------------------------------------------------------------------------------------------------#
def QTL_diff(genotypes_df, phenotypes_df, covariates_df, condition_s, report_maf=True):
if isinstance(genotypes_df, pd.Series):
genotypes_df = genotypes_df.to_frame().T
if isinstance(phenotypes_df, pd.Series):
phenotypes_df = phenotypes_df.to_frame().T
als_qtl = QTL_pairwise(
genotypes_df = genotypes_df.loc[:,condition_s == "ALS"],
phenotypes_df = phenotypes_df.loc[:,condition_s == "ALS"],
covariates_df = covariates_df.loc[:,condition_s == "ALS"] if covariates_df is not None else None,
report_maf = report_maf
).drop(columns=["ma_count"], errors="ignore")
ctr_qtl = QTL_pairwise(
genotypes_df = genotypes_df.loc[:,condition_s == "CTR"],
phenotypes_df = phenotypes_df.loc[:,condition_s == "CTR"],
covariates_df = covariates_df.loc[:,condition_s == "CTR"] if covariates_df is not None else None,
report_maf = report_maf
).drop(columns=["ma_count"], errors="ignore")
merged_qtl = als_qtl.merge(ctr_qtl, on=["variant_id", "phenotype_id"], suffixes=["_ALS", "_CTR"])
# Compute differential statistics
merged_qtl["z"] = (merged_qtl["slope_ALS"] - merged_qtl["slope_CTR"]) / (merged_qtl["slope_se_ALS"] + merged_qtl["slope_se_CTR"])
merged_qtl["pval"] = stats.norm.sf(abs(merged_qtl["z"]))*2
return merged_qtl
def QTL_by_pairs(G, omic_df, pairs, covariates_df, report_maf=False, condition_s=None):
"""Iterate over either genotype or omic (rather than performing full pairwise)."""
phen_id = pairs.columns[0]
# Iterate over the feature that requires fewer QTL calls
if pairs["variant_id"].nunique() < pairs[phen_id].nunique():
logger.write("Iterating over variants...")
grouped_pairs = pairs.groupby("variant_id")
else:
logger.write("Iterating over phenotypes...")
grouped_pairs = pairs.groupby(phen_id)
results = []
for i,feature in enumerate(grouped_pairs.groups.keys()):
logger.update("{} of {} features tested".format(i,grouped_pairs.ngroups))
df = grouped_pairs.get_group(feature)
genotypes_df = G.get_genotypes(df["variant_id"].unique())
phenotypes_df = omic_df.loc[df[phen_id].unique()]
if condition_s is None:
res = QTL_pairwise(genotypes_df, phenotypes_df, covariates_df, report_maf)
else:
res = QTL_diff(genotypes_df, phenotypes_df, covariates_df, condition_s, report_maf)
results.append(res)
logger.flush()
return | pd.concat(results) | pandas.concat |
from itertools import product
from typing import Iterator, Optional
import numpy as np
import pandas as pd
from glycan import Glycan, PTMComposition
class Glycoprotein:
"""
A protein with glycans.
:ivar dict glycosylation_sites: glycosylation sites
:ivar int sites: number of glycosylation sites
.. automethod:: __init__
.. automethod:: __str__
"""
def __init__(self,
sites: int,
library: Optional[pd.DataFrame]=None) -> None:
"""
Create a new glycoprotein.
:param int sites: number of glycosylation sites
:param pd.DataFrame library: dataframe describing a glycan library;
mus contain two columns (name and composition)
:return: nothing
:rtype: None
"""
self.sites = sites
self.glycan_library = []
if library is not None:
for _, row in library.iterrows():
if | pd.isnull(row.iloc[1]) | pandas.isnull |
# ' % kmergrammar
# ' % <NAME> mm2842
# ' % 15th May 2017
# ' # Introduction
# ' Some of the code below is still under active development
# ' ## Required libraries
# + name = 'import_libraries', echo=False
import os
import sys
import numpy as np
import pandas as pd
import sqlalchemy
import logging
import time
from math import log
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from itertools import product
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
# + name= 'hello_world', echo=False
def hello_world():
print("Aksunai qanuipit!")
def createKmerSet(kmersize):
"""
write all possible kmers
:param kmersize: integer, 8
:return uniq_kmers: list of sorted unique kmers
"""
kmerSet = set()
nucleotides = ["a", "c", "g", "t"]
kmerall = product(nucleotides, repeat=kmersize)
for i in kmerall:
kmer = ''.join(i)
kmerSet.add(kmer)
uniq_kmers = sorted(list(kmerSet))
return uniq_kmers
def compute_kmer_entropy(kmer):
"""
compute shannon entropy for each kmer
:param kmer: string
:return entropy: float
"""
prob = [float(kmer.count(c)) / len(kmer) for c in dict.fromkeys(list(kmer))]
entropy = - sum([p * log(p) / log(2.0) for p in prob])
return round(entropy, 2)
def make_stopwords(kmersize):
"""
write filtered out kmers
:param kmersize: integer, 8
:return stopwords: list of sorted low-complexity kmers
"""
kmersize_filter = {5: 1.3, 6: 1.3, 7: 1.3, 8: 1.3, 9: 1.3, 10: 1.3}
limit_entropy = kmersize_filter.get(kmersize)
kmerSet = set()
nucleotides = ["a", "c", "g", "t"]
kmerall = product(nucleotides, repeat=kmersize)
for n in kmerall:
kmer = ''.join(n)
if compute_kmer_entropy(kmer) < limit_entropy:
kmerSet.add(make_newtoken(kmer))
else:
continue
stopwords = sorted(list(kmerSet))
return stopwords
def createNewtokenSet(kmersize):
"""
write all possible newtokens
:param kmersize: integer, 8
:return uniq_newtokens: list of sorted unique newtokens
"""
newtokenSet = set()
uniq_kmers = createKmerSet(kmersize)
for kmer in uniq_kmers:
newtoken = make_newtoken(kmer)
newtokenSet.add(newtoken)
uniq_newtokens = sorted(list(newtokenSet))
return uniq_newtokens
def make_newtoken(kmer):
"""
write a collapsed kmer and kmer reverse complementary as a newtoken
:param kmer: string e.g., "AT"
:return newtoken: string e.g., "atnta"
:param kmer: string e.g., "TA"
:return newtoken: string e.g., "atnta"
"""
kmer = str(kmer).lower()
newtoken = "n".join(sorted([kmer, kmer.translate(str.maketrans('tagc', 'atcg'))[::-1]]))
return newtoken
def write_ngrams(sequence):
"""
write a bag of newtokens of size n
:param sequence: string e.g., "ATCG"
:param (intern) kmerlength e.g., 2
:return newtoken_string: string e.g., "atnta" "gatc" "cgcg"
"""
seq = str(sequence).lower()
finalstart = (len(seq) - kmerlength) + 1
allkmers = [seq[start:(start + kmerlength)] for start in range(0, finalstart)]
tokens = [make_newtoken(kmer) for kmer in allkmers if len(kmer) == kmerlength and "n" not in kmer]
newtoken_string = " ".join(tokens)
return newtoken_string
def save_plot_prc(precision, recall, avg_prec, figure_file, name):
"""
make plot for precission recall
:param precission: precission
:param recall: recall
:param avg_prec: avg_prec
:param figure_file: figure_file
:param name: name
:return plot precission recall curve
"""
plt.clf()
title = 'Precision Recall Curve - double strand ' + name
plt.title(title)
plt.plot(recall, precision, label='Precission = %0.2f' % avg_prec)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.savefig(figure_file)
def save_plot_roc(false_positive_rate, true_positive_rate, roc_auc, figure_file, name):
"""
make plot for roc_auc
:param false_positive_rate: false_positive_rate
:param true_positive_rate: true_positive_rate
:param roc_auc: roc_auc
:param figure_file: figure_file
:param name: name
:return roc_auc
"""
plt.clf()
title = 'Receiver Operating Characteristic - double strand ' + name
plt.title(title)
plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.1, 1.2])
plt.ylim([-0.1, 1.2])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.savefig(figure_file)
if sys.argv[1] == "-help":
print(
"Usage: python kgrammar_bag-of-k-mer_training_testing.py [kmersize, integer] [mode filtered, 'True', or 'False' (i.e., mode full)] [dataset_name, string]")
print("Example: python kgrammar_bag-of-k-mer_training_testing.py 8 False FEA4")
quit()
else:
kmersize = sys.argv[1] # e.g 8
if sys.argv[2] == 'True':
filtered = True
full = False
mode = "_mode_filtered_"
elif sys.argv[2] == 'False':
filtered = False
full = True
mode = "_mode_full_"
dataset_name = sys.argv[3] # e.g "KN1"
kmerlength = int(kmersize)
newtoken_size = 1 + (kmerlength * 2)
pathname = os.path.dirname(sys.argv[0])
WORKING_DIR = os.path.abspath(pathname)
all_tokens = createNewtokenSet(kmerlength)
if kmerlength > 4:
stpwrds = make_stopwords(kmerlength)
else:
filtered = False
full = True
mode = "_mode_full_"
print("for k < 5 only full mode is available!")
expected_tokens = len(all_tokens)
run_id = str(int(time.time()))
file_name = WORKING_DIR + '/output/bag-of-k-mers/' + dataset_name + '/kgrammar_bag-of-k-mers_model_' + run_id + '_' + dataset_name + '_' + str(
kmerlength) + '_' + mode + '.txt'
logging.basicConfig(level=logging.INFO, filename=file_name, filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
logging.info("kmer_grammar_bag-of-k-mers RUN ID")
logging.info(run_id)
logging.info("WORKING_DIR")
logging.info(WORKING_DIR)
logging.info("input: kmerlength")
logging.info(str(kmersize))
logging.info("input: dataset")
logging.info(str(dataset_name))
logging.info("input: filtered")
logging.info(filtered)
inengine = 'sqlite:///' + WORKING_DIR + '/input_databases/' + dataset_name + '/data_model.db'
dbcon = sqlalchemy.create_engine(inengine)
logging.info(inengine)
print('*' * 80)
print("Kgrammer run id: ", run_id)
print("-d %s -k %d -filtered %s" % (dataset_name, kmerlength, str(sys.argv[2])))
trainquery = "SELECT * FROM train ORDER BY RANDOM()"
dftrain = pd.read_sql_query(trainquery, dbcon)
dftrain.columns = ["chr_num", "left_idx", "right_idx", "dna_string", "bound"]
print("training set is ready")
testquery = "SELECT * FROM test ORDER BY RANDOM()"
dftest = pd.read_sql_query(testquery, dbcon)
dftest.columns = ["chr_num", "left_idx", "right_idx", "dna_string", "bound"]
print("test set is ready")
print("Collecting tokens")
dftrain["tokens"] = dftrain["dna_string"].apply(write_ngrams)
dftest["tokens"] = dftest["dna_string"].apply(write_ngrams)
train_tokens = dftrain["tokens"].tolist()
test_tokens = dftest["tokens"].tolist()
print("Collecting labels")
train_labels = dftrain["bound"].tolist()
test_labels = dftest["bound"].tolist()
unique_train_labels = len(list(set(train_labels)))
unique_test_labels = len(list(set(test_labels)))
# Check that labels are as many as expected for binary classification
if unique_train_labels < 2 or unique_test_labels < 2:
print("ERROR: Expected 2 train and test labels. Got %d train labels and %d test labels" % (
unique_train_labels, unique_test_labels))
logging.info("Unique train labels = %d" % unique_train_labels)
logging.info("Unique test labels = %d" % unique_test_labels)
print("log file: " + WORKING_DIR + '/' + file_name)
quit()
Y_DEV = np.asarray(train_labels)
Y_holdout = np.asarray(test_labels)
print("Building a vocabulary from tokens")
tmpvectorizer = TfidfVectorizer(min_df=1, max_df=1.0, sublinear_tf=True, use_idf=True)
X_TFIDF_ALL = tmpvectorizer.fit_transform(all_tokens) # newtoken sequences to numeric index.
vcblry = tmpvectorizer.get_feature_names()
if full:
print("keeping all low-complexity k-mers")
kmer_names = vcblry
feature_names = np.asarray(kmer_names) # key transformation to use the fancy index into the report
else:
print("removing %d low-complexity k-mers" % len(stpwrds))
kmer_names = [x for x in vcblry if x not in stpwrds]
feature_names = np.asarray(kmer_names) # key transformation to use the fancy index into the report
# Check that tokens are as many as expected math.pow(4, kmerlength)/2
if len(kmer_names) > expected_tokens:
print("ERROR: Expected %d tokens. Obtained %d tokens" % (expected_tokens, len(kmer_names)))
logging.info("Expecting %d tokens" % expected_tokens)
logging.info("Feature index contains %d tokens" % len(kmer_names))
logging.info("ERROR: expected %d tokens, got %d tokens" % (expected_tokens, len(kmer_names)))
logging.info("ERROR: More features than expected!")
print("log file: " + WORKING_DIR + '/' + file_name)
quit()
else:
print("Expected %d tokens. Obtained %d tokens" % (expected_tokens, len(kmer_names)))
logging.info("Feature index contains %d tokens" % len(kmer_names))
print("Extracting features from the training data using TfidfVectorizer")
vectorizer = TfidfVectorizer(min_df=1, max_df=1.0, sublinear_tf=True, use_idf=True,
vocabulary=kmer_names) # vectorizer for kmer frequencies
X_TFIDF_DEV = vectorizer.fit_transform(train_tokens)
print("train_samples: %d, n_features: %d" % X_TFIDF_DEV.shape)
print("Positive n_labels: %d Negative n_labels: %d" % (train_labels.count(0), train_labels.count(1)))
logging.info("Train dataset")
logging.info("n_samples: %d, n_features: %d" % X_TFIDF_DEV.shape)
logging.info("Positive n_labels: %d Negative n_labels: %d" % (test_labels.count(0), test_labels.count(1)))
print("Extracting features from the holdout data using TfidfVectorizer")
X_TFIDF_test = vectorizer.fit_transform(test_tokens)
print("test_samples: %d, n_features: %d" % X_TFIDF_test.shape)
print("Positive n_labels: %d Negative n_labels: %d" % (train_labels.count(0), train_labels.count(1)))
logging.info("Test dataset")
logging.info("n_samples: %d, n_features: %d" % X_TFIDF_test.shape)
logging.info("Positive n_labels: %d Negative n_labels: %d" % (test_labels.count(0), test_labels.count(1)))
print("Fiting a LogisticRegression (LR) model to the training set")
TFIDF_LR = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
verbose=0, warm_start=False)
logging.info(TFIDF_LR)
TFIDF_LR.fit(X_TFIDF_DEV, Y_DEV)
print("Predicting labels for holdout set")
LR_hold_TFIDF_pred = TFIDF_LR.predict(X_TFIDF_test) # y_pred
LR_hold_TFIDF_prob = TFIDF_LR.predict_proba(X_TFIDF_test)[:, 1] # y_score
print("Evaluating model")
print(metrics.classification_report(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred
print("Accuracy")
print(metrics.accuracy_score(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred
logging.info("LR evaluation")
logging.info(metrics.classification_report(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred
logging.info("Accuracy")
logging.info(metrics.accuracy_score(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred
logging.info("ROC_AUC")
logging.info(metrics.roc_auc_score(Y_holdout, LR_hold_TFIDF_prob)) # y_true, y_score
fpr, tpr, thresholds = metrics.roc_curve(Y_holdout, LR_hold_TFIDF_prob, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
roc_figure_file = WORKING_DIR + "/output/bag-of-k-mers/" + dataset_name + "/kgrammar_bag-of-k-mer_model_roc_" + mode + dataset_name + "_" + kmersize + "_" + run_id + ".png"
save_plot_roc(fpr, tpr, roc_auc, roc_figure_file, dataset_name)
precision, recall, thresholds = metrics.precision_recall_curve(Y_holdout, LR_hold_TFIDF_prob, pos_label=1)
avg_prc = metrics.average_precision_score(Y_holdout, LR_hold_TFIDF_prob)
prc_figure_file = WORKING_DIR + "/output/bag-of-k-mers/" + dataset_name + "/kgrammar_bag-of-k-mer_model_prc" + mode + dataset_name + "_" + kmersize + "_" + run_id + ".png"
save_plot_prc(precision, recall, avg_prc, prc_figure_file, dataset_name)
# Export the kmer weights from the LR classifier to a sqlite3 database
if hasattr(TFIDF_LR, 'coef_'):
top = np.argsort(TFIDF_LR.coef_[0])[-5:] # select the top 5 index
botton = np.argsort(TFIDF_LR.coef_[0])[:5] # select the bottom 5 index
logging.info("database table LR_results")
logging.info("top 5 positive kmers")
logging.info(" ".join([i.split('n')[0].upper() for i in feature_names[top]]))
logging.info(" ".join([i.split('n')[1].upper() for i in feature_names[top]]))
logging.info("top 5 negative kmers")
logging.info(" ".join([i.split('n')[0].upper() for i in feature_names[botton]]))
logging.info(" ".join([i.split('n')[1].upper() for i in feature_names[botton]]))
print("Saving data to database table LR_results")
print('*' * 80)
print("%s: %s" % ("pos kmers", " ".join([i.split('n')[0].upper() for i in feature_names[top]])))
print("%s: %s" % ("pos kmers", " ".join([i.split('n')[1].upper() for i in feature_names[top]])))
print() # making room
print("%s: %s" % ("neg kmers", " ".join([i.split('n')[0] for i in feature_names[botton]])))
print("%s: %s" % ("neg kmers", " ".join([i.split('n')[1] for i in feature_names[botton]])))
print('*' * 80)
print() # making room
LR_weights = []
for idx, kmer_score in enumerate(TFIDF_LR.coef_[0]):
features = feature_names[idx].split('n')
LR_weights.append({'kmer': features[0].upper(), 'revcomp': features[1].upper(), 'score': kmer_score})
LR_weights_feature = | pd.DataFrame(LR_weights) | pandas.DataFrame |
import pandas as pd
from frozendict import frozendict
from copy import copy
import uuid
from pm4pymdl.objects.mdl.exporter import exporter as mdl_exporter
class Shared:
TSTCT = {}
EKBE_belnr_ebeln = {}
EKPO_matnr_ebeln = {}
EKPO_ebeln_banfn = {}
EKPO_ebeln_ebelp = {}
EKPO_objects = list()
MSEG_mblnr_matnr = {}
MSEG_mblnr_zeile = {}
MSEG_mblnr_ebeln = {}
MSEG_mblnr_ebeln_ebelp = {}
MSEG_objects = list()
RSEG_belnr_matnr = {}
RSEG_belnr_ebeln_ebelp = {}
RSEG_objects = list()
BSEG_belnr_augbl = {}
BSEG_belnr_buzei = {}
BSEG_objects = list()
MARA_objects = list()
LFA1_objects = list()
EBAN_events = {}
EKKO_events = {}
MKPF_events = {}
RBKP_events = {}
BKPF_events = {}
events = []
def read_tstct():
df = pd.read_csv("TSTCT.tsv", sep="\t", dtype={"SPRSL": str, "TCODE": str, "TTEXT": str})
stream = df.to_dict('r')
for el in stream:
Shared.TSTCT[el["TCODE"]] = el["TTEXT"]
def get_activity(tcode):
if tcode in Shared.TSTCT:
return Shared.TSTCT[tcode]
def read_ekbe():
df = pd.read_csv("EKBE.tsv", sep="\t", dtype={"EBELN": str, "EBELP": str, "BELNR": str})
stream = df.to_dict('r')
for el in stream:
if str(el["BELNR"]).lower() != "nan":
if not el["BELNR"] in Shared.EKBE_belnr_ebeln:
Shared.EKBE_belnr_ebeln[el["BELNR"]] = set()
Shared.EKBE_belnr_ebeln[el["BELNR"]].add(el["EBELN"])
def read_ekpo():
df = pd.read_csv("EKPO.tsv", sep="\t",
dtype={"EBELN": str, "EBELP": str, "MATNR": str, "BANFN": str, "BNFPO": str, "NETPR": float,
"PEINH": float, "NETWR": float, "BTWR": float})
stream = df.to_dict('r')
for el in stream:
if str(el["MATNR"]).lower() != "nan":
if not el["MATNR"] in Shared.EKPO_matnr_ebeln:
Shared.EKPO_matnr_ebeln[el["MATNR"]] = set()
Shared.EKPO_matnr_ebeln[el["MATNR"]].add(el["EBELN"])
if str(el["BANFN"]).lower() != "nan":
if not el["EBELN"] in Shared.EKPO_ebeln_banfn:
Shared.EKPO_ebeln_ebelp[el["EBELN"]] = set()
Shared.EKPO_ebeln_ebelp[el["EBELN"]].add(el["BANFN"])
if not el["EBELN"] in Shared.EKPO_ebeln_ebelp:
Shared.EKPO_ebeln_ebelp[el["EBELN"]] = set()
Shared.EKPO_ebeln_ebelp[el["EBELN"]].add(el["EBELN"] + "_" + el["EBELP"])
Shared.EKPO_objects.append(
{"object_id": el["EBELN"] + "_" + el["EBELP"], "object_type": "EBELN_EBELP", "object_matnr": el["MATNR"],
"object_netpr": el["NETPR"], "object_peinh": el["PEINH"], "object_netwr": el["NETWR"],
"object_brtwr": el["BRTWR"], "object_table": "EKPO", "object_ebeln": el["EBELN"],
"object_ebelp": el["EBELP"]})
print("read ekpo")
def read_mseg():
df = pd.read_csv("MSEG.tsv", sep="\t",
dtype={"MBLNR": str, "ZEILE": str, "MATNR": str, "LIFNR": str, "KUNNR": str, "EBELN": str,
"EBELP": str})
stream = df.to_dict('r')
for el in stream:
if str(el["MATNR"]).lower() != "nan":
if not el["MBLNR"] in Shared.MSEG_mblnr_matnr:
Shared.MSEG_mblnr_matnr[el["MBLNR"]] = set()
Shared.MSEG_mblnr_matnr[el["MBLNR"]].add(el["MATNR"])
if not el["MBLNR"] in Shared.MSEG_mblnr_zeile:
Shared.MSEG_mblnr_zeile[el["MBLNR"]] = set()
Shared.MSEG_mblnr_zeile[el["MBLNR"]].add(el["MBLNR"] + "_" + el["ZEILE"])
if str(el["EBELN"]).lower() != "nan" and str(el["EBELP"]).lower() != "nan":
if not el["MBLNR"] in Shared.MSEG_mblnr_ebeln:
Shared.MSEG_mblnr_ebeln[el["MBLNR"]] = set()
Shared.MSEG_mblnr_ebeln[el["MBLNR"]].add(el["EBELN"])
if not el["MBLNR"] in Shared.MSEG_mblnr_ebeln_ebelp:
Shared.MSEG_mblnr_ebeln_ebelp[el["MBLNR"]] = set()
Shared.MSEG_mblnr_ebeln_ebelp[el["MBLNR"]].add(el["EBELN"] + "_" + el["EBELP"])
Shared.MSEG_objects.append(
{"object_id": el["MBLNR"] + "_" + el["ZEILE"], "object_type": "MBLNR_ZEILE", "object_matnr": el["MATNR"],
"object_lifnr": el["LIFNR"], "object_kunnr": el["KUNNR"], "object_table": "MSEG",
"object_mblnr": el["MBLNR"], "object_zeile": el["ZEILE"]})
print("read mseg")
def read_rseg():
df = pd.read_csv("RSEG.tsv", sep="\t",
dtype={"BELNR": str, "EBELN": str, "EBELP": str, "MATNR": str, "WRBTR": float})
stream = df.to_dict('r')
for el in stream:
if str(el["MATNR"]).lower() != "nan":
if not el["BELNR"] in Shared.RSEG_belnr_matnr:
Shared.RSEG_belnr_matnr[el["BELNR"]] = set()
Shared.RSEG_belnr_matnr[el["BELNR"]].add(el["MATNR"])
if not el["BELNR"] in Shared.RSEG_belnr_ebeln_ebelp:
Shared.RSEG_belnr_ebeln_ebelp[el["BELNR"]] = set()
Shared.RSEG_belnr_ebeln_ebelp[el["BELNR"]].add(el["BELNR"] + "_" + el["EBELN"] + "_" + el["EBELP"])
Shared.RSEG_objects.append(
{"object_id": el["BELNR"] + "_" + el["EBELN"] + "_" + el["EBELP"], "object_type": "BELNR_EBELN_EBELP",
"object_matnr": el["MATNR"], "object_wrbtr": el["WRBTR"], "object_table": "RSEG",
"object_belnr": el["BELNR"], "object_ebeln": el["EBELN"], "object_ebelp": el["EBELP"],
"object_ebeln_ebelp": el["EBELN"] + "_" + el["EBELP"]})
print("read rseg")
def read_bseg():
df = pd.read_csv("BSEG.tsv", sep="\t", dtype={"BELNR": str, "BUZEI": str, "AUGBL": str, "WRBTR": str})
stream = df.to_dict("r")
for el in stream:
if str(el["AUGBL"]).lower() != "nan":
if not el["BELNR"] in Shared.BSEG_belnr_augbl:
Shared.BSEG_belnr_augbl[el["BELNR"]] = set()
Shared.BSEG_belnr_augbl[el["BELNR"]].add(el["AUGBL"])
if not el["BELNR"] in Shared.BSEG_belnr_buzei:
Shared.BSEG_belnr_buzei[el["BELNR"]] = set()
Shared.BSEG_belnr_buzei[el["BELNR"]].add(el["BELNR"] + "_" + el["BUZEI"])
Shared.BSEG_objects.append(
{"object_id": el["BELNR"] + "_" + el["BUZEI"], "object_type": "BELNR_BUZEI", "object_augbl": el["AUGBL"],
"object_wrbtr": el["WRBTR"], "object_table": "BSEG", "object_belnr": el["BELNR"],
"object_buzei": el["BUZEI"]})
print("read bseg")
def read_mara():
df = pd.read_csv("MARA.tsv", sep="\t",
dtype={"MATNR": str, "ERSDA": str, "ERNAM": str, "MBRSH": str, "MATKL": str, "NTGEW": str,
"VOLUMN": str, "TRAGR": str})
# MATNR str
# ERSDA str
# ERNAM str
# MBRSH str
# MATKL str
# NTGEW str
# VOLUMN str
# TRAGR str
stream = df.to_dict("r")
for el in stream:
Shared.MARA_objects.append(
{"object_id": el["MATNR"], "object_type": "MATNR", "object_table": "MARA", "object_ersda": el["ERSDA"],
"object_mbrsh": el["MBRSH"], "object_matkl": el["MATKL"], "object_ntgew": el["NTGEW"],
"object_volum": el["VOLUM"], "object_tragr": el["TRAGR"]})
print("read mara")
def read_lfa1():
df = pd.read_csv("LFA1.tsv", sep="\t", dtype={"LIFNR": str, "LAND1": str, "NAME1": str, "ORT01": str, "REGIO": str})
stream = df.to_dict("r")
for el in stream:
Shared.LFA1_objects.append(
{"object_id": el["LIFNR"], "object_type": "LIFNR", "object_table": "LFA1", "object_land1": el["LAND1"],
"object_name1": el["NAME1"], "object_ort01": el["ORT01"], "object_regio": el["REGIO"]})
print("read lfa1")
def read_eban():
df = pd.read_csv("EBAN.tsv", sep="\t", dtype={"BANFN": str, "BNFPO": str, "ERNAM": str, "ERDAT": str, "MATNR": str})
df["ERDAT"] = pd.to_datetime(df["ERDAT"], format="%d.%m.%Y", errors='coerce')
stream = df.to_dict('r')
for el in stream:
if not el["BANFN"] in Shared.EBAN_events:
Shared.EBAN_events[el["BANFN"]] = list()
Shared.EBAN_events[el["BANFN"]].append({"event_activity": get_activity("ME51N"), "event_timestamp": el["ERDAT"],
"event_table": "EBAN"})
Shared.EBAN_events[el["BANFN"]] = sorted(Shared.EBAN_events[el["BANFN"]], key=lambda x: x["event_timestamp"])
def read_ekko():
df = pd.read_csv("EKKO.tsv", sep="\t", dtype={"EBELN": str, "AEDAT": str, "ERNAM": str, "LIFNR": str})
df["AEDAT"] = pd.to_datetime(df["AEDAT"], format="%d.%m.%Y", errors='coerce')
stream = df.to_dict('r')
for el in stream:
if not el["EBELN"] in Shared.EKKO_events:
Shared.EKKO_events[el["EBELN"]] = list()
Shared.EKKO_events[el["EBELN"]].append(
{"event_activity": get_activity("ME21N"), "event_timestamp": el["AEDAT"], "event_table": "EKKO"})
Shared.EKKO_events[el["EBELN"]] = sorted(Shared.EKKO_events[el["EBELN"]], key=lambda x: x["event_timestamp"])
def read_mkpf():
df = pd.read_csv("MKPF.tsv", sep="\t",
dtype={"MBLNR": str, "CPUDT": str, "CPUTM": str, "USNAM": str, "TCODE": str, "TCODE2": str})
df["event_timestamp"] = df["CPUDT"] + " " + df["CPUTM"]
df["event_timestamp"] = pd.to_datetime(df["event_timestamp"], format="%d.%m.%Y %H:%M:%S", errors='coerce')
stream = df.to_dict('r')
for el in stream:
if str(el["TCODE"]).lower() != "nan":
if not el["MBLNR"] in Shared.MKPF_events:
Shared.MKPF_events[el["MBLNR"]] = list()
Shared.MKPF_events[el["MBLNR"]].append(
{"event_activity": get_activity(el["TCODE"]), "event_timestamp": el["event_timestamp"],
"event_resource": el["USNAM"], "event_table": "MKPF"})
Shared.MKPF_events[el["MBLNR"]] = sorted(Shared.MKPF_events[el["MBLNR"]],
key=lambda x: x["event_timestamp"])
def read_rbkp():
df = pd.read_csv("RBKP.tsv", sep="\t", dtype={"BELNR": str, "USNAM": str, "TCODE": str, "CPUDT": str, "CPUTM": str})
df["event_timestamp"] = df["CPUDT"] + " " + df["CPUTM"]
df["event_timestamp"] = pd.to_datetime(df["event_timestamp"], format="%d.%m.%Y %H:%M:%S", errors='coerce')
stream = df.to_dict('r')
for el in stream:
if str(el["TCODE"]).lower() != "nan":
if not el["BELNR"] in Shared.RBKP_events:
Shared.RBKP_events[el["BELNR"]] = list()
Shared.RBKP_events[el["BELNR"]].append(
{"event_activity": get_activity(el["TCODE"]), "event_timestamp": el["event_timestamp"],
"event_resource": el["USNAM"], "event_table": "RBKP"})
Shared.RBKP_events[el["BELNR"]] = sorted(Shared.RBKP_events[el["BELNR"]],
key=lambda x: x["event_timestamp"])
def read_bkpf():
df = pd.read_csv("BKPF.tsv", sep="\t", dtype={"BELNR": str, "CPUDT": str, "CPUTM": str, "USNAM": str, "TCODE": str})
rbkp_df = pd.read_csv("RBKP.tsv", sep="\t",
dtype={"BELNR": str, "USNAM": str, "TCODE": str, "CPUDT": str, "CPUTM": str})
df = df[df["BELNR"].isin(rbkp_df["BELNR"])]
df["event_timestamp"] = df["CPUDT"] + " " + df["CPUTM"]
df["event_timestamp"] = pd.to_datetime(df["event_timestamp"], format="%d.%m.%Y %H:%M:%S", errors='coerce')
stream = df.to_dict('r')
for el in stream:
if str(el["TCODE"]).lower() != "nan":
if not el["BELNR"] in Shared.BKPF_events:
Shared.BKPF_events[el["BELNR"]] = list()
Shared.BKPF_events[el["BELNR"]].append(
{"event_activity": get_activity(el["TCODE"]), "event_timestamp": el["event_timestamp"],
"event_resource": el["USNAM"], "event_table": "BKPF"})
Shared.BKPF_events[el["BELNR"]] = sorted(Shared.BKPF_events[el["BELNR"]],
key=lambda x: x["event_timestamp"])
def write_events():
for evk in Shared.EBAN_events:
evs = Shared.EBAN_events[evk]
i = 0
while i < len(evs):
ev = evs[i]
ev["event_id"] = str(uuid.uuid4())
nev = copy(ev)
nev["BANFN"] = evk
Shared.events.append(nev)
i = i + 1
for evk in Shared.EKKO_events:
evs = Shared.EKKO_events[evk]
i = 0
while i < len(evs):
ev = evs[i]
ev["event_id"] = str(uuid.uuid4())
nev = copy(ev)
nev["EBELN"] = evk
Shared.events.append(nev)
if i == 0:
if evk in Shared.EKPO_ebeln_banfn:
for it in Shared.EKPO_ebeln_banfn[evk]:
nev = copy(ev)
nev["BANFN"] = it
Shared.events.append(nev)
if evk in Shared.EKPO_ebeln_ebelp:
for it in Shared.EKPO_ebeln_ebelp[evk]:
nev = copy(ev)
nev["EBELN_EBELP"] = it
Shared.events.append(nev)
if i == len(evs) - 1:
if evk in Shared.EKBE_belnr_ebeln:
for doc in Shared.EKBE_belnr_ebeln[evk]:
nev = copy(ev)
nev["MBLNR"] = doc
Shared.events.append(nev)
i = i + 1
for evk in Shared.MKPF_events:
evs = Shared.MKPF_events[evk]
i = 0
while i < len(evs):
ev = evs[i]
ev["event_id"] = str(uuid.uuid4())
nev = copy(ev)
nev["MBLNR"] = evk
Shared.events.append(nev)
if i == 0:
"""
if evk in Shared.EKPO_matnr_ebeln:
for ord in Shared.EKPO_matnr_ebeln[evk]:
nev = copy(ev)
nev["EBELN"] = ord
Shared.events.append(nev)
"""
if evk in Shared.MSEG_mblnr_ebeln:
for it in Shared.MSEG_mblnr_ebeln[evk]:
nev = copy(ev)
nev["EBELN"] = it
Shared.events.append(nev)
if evk in Shared.MSEG_mblnr_ebeln_ebelp:
for it in Shared.MSEG_mblnr_ebeln_ebelp[evk]:
nev = copy(ev)
nev["EBELN_EBELP"] = it
Shared.events.append(nev)
if evk in Shared.MSEG_mblnr_matnr:
for mat in Shared.MSEG_mblnr_matnr[evk]:
nev = copy(ev)
nev["MATNR"] = mat
Shared.events.append(nev)
if evk in Shared.MSEG_mblnr_zeile:
for it in Shared.MSEG_mblnr_zeile[evk]:
nev = copy(ev)
nev["MBLNR_ZEILE"] = it
Shared.events.append(nev)
i = i + 1
for evk in Shared.RBKP_events:
evs = Shared.RBKP_events[evk]
i = 0
while i < len(evs):
ev = evs[i]
ev["event_id"] = str(uuid.uuid4())
nev = copy(ev)
nev["BELNR"] = evk
Shared.events.append(nev)
if i == 0:
if evk in Shared.RSEG_belnr_matnr:
for mat in Shared.RSEG_belnr_matnr[evk]:
nev = copy(ev)
nev["MATNR"] = mat
Shared.events.append(nev)
if evk in Shared.RSEG_belnr_ebeln_ebelp:
for it in Shared.RSEG_belnr_ebeln_ebelp[evk]:
nev = copy(ev)
nev["BELNR_EBELN_EBELP"] = it
Shared.events.append(nev)
nev = copy(ev)
nev["EBELN_EBELP"] = it.split("_")[1] + "_" + it.split("_")[2]
Shared.events.append(nev)
if evk in Shared.EKBE_belnr_ebeln:
for it in Shared.EKBE_belnr_ebeln[evk]:
nev = copy(ev)
nev["EBELN"] = it
Shared.events.append(nev)
i = i + 1
for evk in Shared.BKPF_events:
evs = Shared.BKPF_events[evk]
i = 0
while i < len(evs):
ev = evs[i]
ev["event_id"] = str(uuid.uuid4())
nev = copy(ev)
nev["BELNR"] = evk
Shared.events.append(nev)
if i == 0:
if evk in Shared.BSEG_belnr_augbl:
for it in Shared.BSEG_belnr_augbl[evk]:
nev = copy(ev)
nev["AUGBL"] = it
Shared.events.append(nev)
if evk in Shared.BSEG_belnr_buzei:
for it in Shared.BSEG_belnr_buzei[evk]:
nev = copy(ev)
nev["BELNR_BUZEI"] = it
Shared.events.append(nev)
i = i + 1
if __name__ == "__main__":
read_bseg()
read_tstct()
read_eban()
read_bkpf()
read_ekbe()
read_ekpo()
read_mseg()
read_rseg()
read_mara()
read_lfa1()
read_ekko()
read_mkpf()
read_rbkp()
write_events()
Shared.events = sorted(Shared.events, key=lambda x: x["event_timestamp"])
print("written events")
events_df = pd.DataFrame(Shared.events)
print("got dataframe")
events_df.type = "exploded"
ekpo_objects = pd.DataFrame(Shared.EKPO_objects)
mseg_objects = pd.DataFrame(Shared.MSEG_objects)
rseg_objects = pd.DataFrame(Shared.RSEG_objects)
mara_objects = | pd.DataFrame(Shared.MARA_objects) | pandas.DataFrame |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Read the txt files containing the raw design tables from Chen, Sun and Wu (1993), format them and store them
in a new excel file, with one sheet per run size.
Created on Wed Jan 19 15:57:58 2022
@author: <NAME> - alexandre dot bohyn [at] kuleuven dot be
"""
import os
# % Packages
import re
import pandas as pd
# Function to format the file
def format_file(fname: str):
# Create dictionary for the designs
designs_dict = | pd.DataFrame() | pandas.DataFrame |
import asyncio
from collections import defaultdict, namedtuple
from dataclasses import dataclass, fields as dataclass_fields
from datetime import date, datetime, timedelta, timezone
from enum import Enum
from itertools import chain, repeat
import logging
import pickle
from typing import Collection, Dict, Generator, Iterable, Iterator, KeysView, List, \
Mapping, Optional, Sequence, Set, Tuple, Union
import aiomcache
import numpy as np
import pandas as pd
from pandas.core.common import flatten
from sqlalchemy import sql
from sqlalchemy.orm import aliased
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.elements import BinaryExpression
from athenian.api import metadata
from athenian.api.async_utils import gather, read_sql_query
from athenian.api.cache import cached, CancelCache, short_term_exptime
from athenian.api.controllers.logical_repos import coerce_logical_repos
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.github.commit import BRANCH_FETCH_COMMITS_COLUMNS, \
DAG, fetch_precomputed_commit_history_dags, fetch_repository_commits_no_branch_dates
from athenian.api.controllers.miners.github.dag_accelerated import searchsorted_inrange
from athenian.api.controllers.miners.github.label import fetch_labels_to_filter
from athenian.api.controllers.miners.github.logical import split_logical_repositories
from athenian.api.controllers.miners.github.precomputed_prs import \
discover_inactive_merged_unreleased_prs, MergedPRFactsLoader, OpenPRFactsLoader, \
update_unreleased_prs
from athenian.api.controllers.miners.github.release_load import ReleaseLoader
from athenian.api.controllers.miners.github.release_match import PullRequestToReleaseMapper, \
ReleaseToPullRequestMapper
from athenian.api.controllers.miners.github.released_pr import matched_by_column
from athenian.api.controllers.miners.jira.issue import generate_jira_prs_query
from athenian.api.controllers.miners.types import DeploymentConclusion, MinedPullRequest, \
nonemax, nonemin, PRParticipants, PRParticipationKind, PullRequestFacts, PullRequestFactsMap
from athenian.api.controllers.prefixer import Prefixer
from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseMatch, \
ReleaseSettings
from athenian.api.db import add_pdb_misses, Database, DatabaseLike
from athenian.api.defer import AllEvents, defer
from athenian.api.int_to_str import int_to_str
from athenian.api.models.metadata.github import Base, NodePullRequestJiraIssues, \
PullRequest, PullRequestComment, PullRequestCommit, PullRequestLabel, PullRequestReview, \
PullRequestReviewComment, PullRequestReviewRequest, PushCommit, Release
from athenian.api.models.metadata.jira import Component, Issue
from athenian.api.models.persistentdata.models import DeploymentNotification
from athenian.api.models.precomputed.models import GitHubPullRequestDeployment
from athenian.api.tracing import sentry_span
@dataclass
class PRDataFrames(Mapping[str, pd.DataFrame]):
"""Set of dataframes with all the PR data we can reach."""
prs: pd.DataFrame
commits: pd.DataFrame
releases: pd.DataFrame
jiras: pd.DataFrame
reviews: pd.DataFrame
review_comments: pd.DataFrame
review_requests: pd.DataFrame
comments: pd.DataFrame
labels: pd.DataFrame
deployments: pd.DataFrame
def __iter__(self) -> Iterator[str]:
"""Implement iter() - return an iterator over the field names."""
return iter((f.name for f in dataclass_fields(self)))
def __getitem__(self, key: str) -> pd.DataFrame:
"""Implement self[key]."""
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def __setitem__(self, key: str, value: pd.DataFrame) -> None:
"""Implement self[key] = value."""
for f in dataclass_fields(self):
if key == f.name:
break
else:
raise KeyError(key)
setattr(self, key, value)
def __len__(self) -> int:
"""Implement len()."""
return len(dataclass_fields(self))
class PullRequestMiner:
"""Load all the information related to Pull Requests from the metadata DB. Iterate over it \
to access individual PR objects."""
CACHE_TTL = short_term_exptime
log = logging.getLogger("%s.PullRequestMiner" % metadata.__package__)
ReleaseMappers = namedtuple("ReleaseMappers", [
"map_releases_to_prs", "map_prs_to_releases", "load_releases"])
mappers = ReleaseMappers(
map_releases_to_prs=ReleaseToPullRequestMapper.map_releases_to_prs,
map_prs_to_releases=PullRequestToReleaseMapper.map_prs_to_releases,
load_releases=ReleaseLoader.load_releases,
)
def __init__(self, dfs: PRDataFrames):
"""Initialize a new instance of `PullRequestMiner`."""
self._dfs = dfs
@property
def dfs(self) -> PRDataFrames:
"""Return the bound dataframes with PR information."""
return self._dfs
def __len__(self) -> int:
"""Return the number of loaded pull requests."""
return len(self._dfs.prs)
def __iter__(self) -> Generator[MinedPullRequest, None, None]:
"""Iterate over the individual pull requests."""
assert self._dfs.prs.index.nlevels == 2
df_fields = [f.name for f in dataclass_fields(MinedPullRequest) if f.name != "pr"]
dfs = []
grouped_df_iters = []
index_backup = []
for k in df_fields:
plural = k.endswith("s")
df = getattr(self._dfs, k if plural else (k + "s")) # type: pd.DataFrame
dfs.append(df)
# our very own groupby() allows us to call take() with reduced overhead
node_ids = df.index.get_level_values(0).values.astype(int, copy=False)
with_repos = k == "release"
if df.index.nlevels > 1:
# the second level adds determinism to the iteration order
second_level = df.index.get_level_values(1).values
node_ids_bytes = int_to_str(node_ids)
if second_level.dtype == int:
order_keys = np.char.add(node_ids_bytes, int_to_str(second_level))
else:
order_keys = np.char.add(node_ids_bytes,
second_level.astype("S", copy=False))
else:
order_keys = node_ids
df_order = np.argsort(order_keys)
if not with_repos:
unique_node_ids, node_ids_unique_counts = np.unique(node_ids, return_counts=True)
offsets = np.zeros(len(node_ids_unique_counts) + 1, dtype=int)
np.cumsum(node_ids_unique_counts, out=offsets[1:])
groups = self._iter_by_split(df_order, offsets)
grouped_df_iters.append(iter(zip(unique_node_ids, repeat(None), groups)))
else:
_, unique_counts = np.unique(order_keys, return_counts=True)
node_ids = node_ids[df_order]
repos = df.index.get_level_values(1).values[df_order].astype("U")
offsets = np.zeros(len(unique_counts) + 1, dtype=int)
np.cumsum(unique_counts, out=offsets[1:])
groups = self._iter_by_split(df_order, offsets)
grouped_df_iters.append(iter(zip(
node_ids[offsets[:-1]], repos[offsets[:-1]], groups)))
if plural:
index_backup.append(df.index)
df.index = df.index.droplevel(0)
else:
index_backup.append(None)
try:
grouped_df_states = []
for i in grouped_df_iters:
try:
grouped_df_states.append(next(i))
except StopIteration:
grouped_df_states.append((None, None, None))
empty_df_cache = {}
pr_columns = [PullRequest.node_id.name, PullRequest.repository_full_name.name]
pr_columns.extend(self._dfs.prs.columns)
if not self._dfs.prs.index.is_monotonic_increasing:
raise IndexError("PRs index must be pre-sorted ascending: "
"prs.sort_index(inplace=True)")
for pr_tuple in self._dfs.prs.itertuples():
(pr_node_id, repo), *pr_tuple = pr_tuple
items = {"pr": dict(zip(pr_columns, [pr_node_id, repo] + pr_tuple))}
for i, (k, (state_pr_node_id, state_repo, gdf), git, df) in enumerate(zip(
df_fields, grouped_df_states, grouped_df_iters, dfs)):
while state_pr_node_id is not None and (
state_pr_node_id < pr_node_id
or (state_pr_node_id == pr_node_id
and state_repo is not None
and state_repo < repo)):
try:
state_pr_node_id, state_repo, gdf = next(git)
except StopIteration:
state_pr_node_id, state_repo, gdf = None, None, None
grouped_df_states[i] = state_pr_node_id, state_repo, gdf
if state_pr_node_id == pr_node_id and \
(state_repo is None or state_repo == repo):
if not k.endswith("s"):
# much faster than items.iloc[gdf[0]]
gdf = {c: v for c, v in zip(df.columns, df._data.fast_xs(gdf[0]))}
else:
gdf = df.take(gdf)
items[k] = gdf
else:
try:
items[k] = empty_df_cache[k]
except KeyError:
if k.endswith("s"):
empty_val = df.iloc[:0].copy()
else:
empty_val = {c: None for c in df.columns}
items[k] = empty_df_cache[k] = empty_val
yield MinedPullRequest(**items)
finally:
for df, index in zip(dfs, index_backup):
if index is not None:
df.index = index
def drop(self, node_ids: Collection[int]) -> pd.Index:
"""
Remove PRs from the given collection of PR node IDs in-place.
Node IDs don't have to be all present.
:return: Actually removed node IDs.
"""
removed = self._dfs.prs.index.get_level_values(0).intersection(node_ids)
if removed.empty:
return removed
self._dfs.prs.drop(removed, inplace=True)
for df in self._dfs.values():
df.drop(removed, inplace=True, errors="ignore",
level=0 if isinstance(df.index, pd.MultiIndex) else None)
return removed
def _deserialize_mine_cache(buffer: bytes) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
Dict[str, ReleaseMatch],
asyncio.Event]:
stuff = pickle.loads(buffer)
event = asyncio.Event()
event.set()
return (*stuff, event)
@sentry_span
def _postprocess_cached_prs(
result: Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event],
date_to: date,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]],
truncate: bool,
**_) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event]:
dfs, _, cached_repositories, cached_participants, cached_labels, cached_jira, \
cached_with_jira_map, _, _ = result
if with_jira_map and not cached_with_jira_map:
raise CancelCache()
cls = PullRequestMiner
if (repositories - cached_repositories or
not cls._check_participants_compatibility(cached_participants, participants) or
not cached_labels.compatible_with(labels) or
not cached_jira.compatible_with(jira)):
raise CancelCache()
to_remove = set()
if pr_blacklist is not None:
to_remove.update(pr_blacklist[0])
if no_logical_repos := (coerce_logical_repos(repositories).keys() == repositories):
to_remove.update(dfs.prs.index.get_level_values(0).values[
np.in1d(dfs.prs.index.get_level_values(1).values,
list(repositories), assume_unique=True, invert=True),
])
time_to = None if truncate else pd.Timestamp(date_to, tzinfo=timezone.utc)
to_remove.update(cls._find_drop_by_participants(dfs, participants, time_to))
to_remove.update(cls._find_drop_by_labels(dfs, labels))
to_remove.update(cls._find_drop_by_jira(dfs, jira))
cls._drop(dfs, to_remove)
if not no_logical_repos:
dfs.prs = dfs.prs.take(np.flatnonzero(
np.in1d(dfs.prs.index.get_level_values(1).values,
list(repositories), assume_unique=True),
))
return result
@classmethod
@sentry_span
@cached(
exptime=lambda cls, **_: cls.CACHE_TTL,
serialize=lambda r: pickle.dumps(r[:-1]),
deserialize=_deserialize_mine_cache,
key=lambda date_from, date_to, exclude_inactive, release_settings, logical_settings, updated_min, updated_max, pr_blacklist, truncate, **_: ( # noqa
date_from.toordinal(), date_to.toordinal(), exclude_inactive,
release_settings, logical_settings,
updated_min.timestamp() if updated_min is not None else None,
updated_max.timestamp() if updated_max is not None else None,
",".join(map(str, sorted(pr_blacklist[0]) if pr_blacklist is not None else [])),
truncate,
),
postprocess=_postprocess_cached_prs,
)
async def _mine(cls,
date_from: date,
date_to: date,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
updated_min: Optional[datetime],
updated_max: Optional[datetime],
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]],
truncate: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event]:
assert isinstance(date_from, date) and not isinstance(date_from, datetime)
assert isinstance(date_to, date) and not isinstance(date_to, datetime)
assert isinstance(repositories, set)
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
assert isinstance(rdb, Database)
assert (updated_min is None) == (updated_max is None)
time_from, time_to = (pd.Timestamp(t, tzinfo=timezone.utc) for t in (date_from, date_to))
pr_blacklist_expr = ambiguous = None
if pr_blacklist is not None:
pr_blacklist, ambiguous = pr_blacklist
if len(pr_blacklist) > 0:
pr_blacklist_expr = PullRequest.node_id.notin_any_values(pr_blacklist)
if logical_settings.has_logical_prs():
physical_repos = coerce_logical_repos(repositories).keys()
else:
physical_repos = repositories
pdags = await fetch_precomputed_commit_history_dags(physical_repos, account, pdb, cache)
fetch_branch_dags_task = asyncio.create_task(
cls._fetch_branch_dags(
physical_repos, pdags, branches, account, meta_ids, mdb, pdb, cache),
name="_fetch_branch_dags",
)
# the heaviest task should always go first
tasks = [
cls.mappers.map_releases_to_prs(
repositories, branches, default_branches, time_from, time_to,
participants.get(PRParticipationKind.AUTHOR, []),
participants.get(PRParticipationKind.MERGER, []),
jira, release_settings, logical_settings, updated_min, updated_max, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, pr_blacklist_expr, None,
truncate=truncate),
cls.fetch_prs(
time_from, time_to, physical_repos, participants, labels, jira,
exclude_inactive, pr_blacklist_expr, None, branches, pdags, account, meta_ids,
mdb, pdb, cache, updated_min=updated_min, updated_max=updated_max,
fetch_branch_dags_task=fetch_branch_dags_task),
cls.map_deployments_to_prs(
physical_repos, time_from, time_to, participants,
labels, jira, updated_min, updated_max, prefixer, branches, pdags,
account, meta_ids, mdb, pdb, cache, pr_blacklist,
fetch_branch_dags_task=fetch_branch_dags_task),
]
# the following is a very rough approximation regarding updated_min/max:
# we load all of none of the inactive merged PRs
# see also: load_precomputed_done_candidates() which generates `ambiguous`
if not exclude_inactive and (updated_min is None or updated_min <= time_from):
tasks.append(cls._fetch_inactive_merged_unreleased_prs(
time_from, time_to, repositories, participants, labels, jira, default_branches,
release_settings, logical_settings.has_logical_prs(),
prefixer, account, meta_ids, mdb, pdb, cache))
# we don't load inactive released undeployed PRs because nobody needs them
(
(released_prs, releases, release_settings, matched_bys,
release_dags, precomputed_observed),
(prs, branch_dags, _),
deployed_prs,
*unreleased,
) = await gather(*tasks)
del pr_blacklist_expr
deployed_releases_task = None
if not deployed_prs.empty:
covered_prs = prs.index.union(released_prs.index)
if unreleased:
covered_prs = covered_prs.union(unreleased[0].index)
new_prs = deployed_prs.index.difference(covered_prs)
if not new_prs.empty:
new_prs = deployed_prs[[
PullRequest.merged_at.name, PullRequest.repository_full_name.name,
]].loc[new_prs]
min_deployed_merged = new_prs[PullRequest.merged_at.name].min()
if min_deployed_merged < time_from:
deployed_releases_task = asyncio.create_task(
cls.mappers.load_releases(
new_prs[PullRequest.repository_full_name.name].unique(),
branches, default_branches, min_deployed_merged, time_from,
release_settings, logical_settings, prefixer, account, meta_ids,
mdb, pdb, rdb, cache),
name="PullRequestMiner.mine/deployed_releases",
)
concatenated = [prs, released_prs, deployed_prs, *unreleased]
missed_prs = cls._extract_missed_prs(ambiguous, pr_blacklist, deployed_prs, matched_bys)
if missed_prs:
add_pdb_misses(pdb, "PullRequestMiner.mine/blacklist",
sum(len(v) for v in missed_prs.values()))
# these PRs are released by branch and not by tag, and we require by tag.
# we have not fetched them yet because they are in pr_blacklist
# and they are in pr_blacklist because we have previously loaded them in
# load_precomputed_done_candidates();
# now fetch only these `missed_prs`, respecting the filters.
pr_whitelist = PullRequest.node_id.in_(
list(chain.from_iterable(missed_prs.values())))
tasks = [
cls.mappers.map_releases_to_prs(
missed_prs, branches, default_branches, time_from, time_to,
participants.get(PRParticipationKind.AUTHOR, []),
participants.get(PRParticipationKind.MERGER, []),
jira, release_settings, logical_settings, updated_min, updated_max, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, None, pr_whitelist,
truncate, precomputed_observed=precomputed_observed),
cls.fetch_prs(
time_from, time_to, missed_prs.keys(), participants, labels, jira,
exclude_inactive, None, pr_whitelist, branches, branch_dags, account, meta_ids,
mdb, pdb, cache, updated_min=updated_min, updated_max=updated_max,
fetch_branch_dags_task=fetch_branch_dags_task),
]
missed_released_prs, (missed_prs, *_) = await gather(*tasks)
concatenated.extend([missed_released_prs, missed_prs])
fetch_branch_dags_task.cancel() # 99.999% that it was awaited, but still
prs = pd.concat(concatenated, copy=False)
prs.reset_index(inplace=True)
prs.drop_duplicates([PullRequest.node_id.name, PullRequest.repository_full_name.name],
inplace=True)
prs.set_index(PullRequest.node_id.name, inplace=True)
prs.sort_index(inplace=True)
if unreleased:
unreleased = np.array([
unreleased[0].index.values,
unreleased[0][PullRequest.repository_full_name.name].values,
], dtype=object).T
tasks = [
# bypass the useless inner caching by calling _mine_by_ids directly
cls._mine_by_ids(
prs, unreleased, repositories, time_to, releases, matched_bys,
branches, default_branches, release_dags, release_settings, logical_settings,
prefixer, account, meta_ids, mdb, pdb, rdb, cache,
truncate=truncate, with_jira=with_jira_map,
extra_releases_task=deployed_releases_task,
physical_repositories=physical_repos),
OpenPRFactsLoader.load_open_pull_request_facts(prs, repositories, account, pdb),
]
(dfs, unreleased_facts, unreleased_prs_event), open_facts = await gather(
*tasks, op="PullRequestMiner.mine/external_data")
to_drop = cls._find_drop_by_participants(dfs, participants, None if truncate else time_to)
to_drop |= cls._find_drop_by_labels(dfs, labels)
if exclude_inactive:
to_drop |= cls._find_drop_by_inactive(dfs, time_from, time_to)
cls._drop(dfs, to_drop)
facts = open_facts
for k, v in unreleased_facts.items(): # merged unreleased PR precomputed facts
if v is not None: # it can be None because the pdb table is filled in two steps
facts[k] = v
dfs.prs = split_logical_repositories(
dfs.prs, dfs.labels, repositories, logical_settings)
return dfs, facts, repositories, participants, labels, jira, with_jira_map, matched_bys, \
unreleased_prs_event
_deserialize_mine_cache = staticmethod(_deserialize_mine_cache)
_postprocess_cached_prs = staticmethod(_postprocess_cached_prs)
def _deserialize_mine_by_ids_cache(
buffer: bytes) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
dfs, facts = pickle.loads(buffer)
event = asyncio.Event()
event.set()
return dfs, facts, event
@classmethod
@cached(
exptime=lambda cls, **_: cls.CACHE_TTL,
serialize=lambda r: pickle.dumps(r[:-1]),
deserialize=_deserialize_mine_by_ids_cache,
key=lambda prs, unreleased, releases, time_to, logical_settings, truncate=True, with_jira=True, **_: ( # noqa
",".join(map(str, prs.index.values)),
",".join(map(str, unreleased)),
",".join(map(str, releases[Release.node_id.name].values)),
time_to.timestamp(),
logical_settings,
truncate,
with_jira,
),
)
async def mine_by_ids(cls,
prs: pd.DataFrame,
unreleased: Collection[Tuple[int, str]],
logical_repositories: Union[Set[str], KeysView[str]],
time_to: datetime,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
truncate: bool = True,
with_jira: bool = True,
physical_repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
"""
Fetch PR metadata for certain PRs.
:param prs: pandas DataFrame with fetched PullRequest-s. Only the details about those PRs \
will be loaded from the DB.
:param truncate: Do not load anything after `time_to`.
:param with_jira: Value indicating whether to load the mapped JIRA issues.
:return: 1. List of mined DataFrame-s. \
2. mapping to PullRequestFacts of unreleased merged PRs. \
3. Synchronization for updating the pdb table with merged unreleased PRs.
"""
return await cls._mine_by_ids(
prs, unreleased, logical_repositories, time_to, releases, matched_bys,
branches, default_branches, dags, release_settings, logical_settings, prefixer,
account, meta_ids, mdb, pdb, rdb, cache, truncate=truncate, with_jira=with_jira,
physical_repositories=physical_repositories)
_deserialize_mine_by_ids_cache = staticmethod(_deserialize_mine_by_ids_cache)
@classmethod
@sentry_span
async def _mine_by_ids(cls,
prs: pd.DataFrame,
unreleased: Collection[Tuple[int, str]],
logical_repositories: Union[Set[str], KeysView[str]],
time_to: datetime,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
truncate: bool = True,
with_jira: bool = True,
extra_releases_task: Optional[asyncio.Task] = None,
physical_repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
assert prs.index.nlevels == 1
node_ids = prs.index if len(prs) > 0 else set()
facts = {} # precomputed PullRequestFacts about merged unreleased PRs
unreleased_prs_event: asyncio.Event = None
merged_unreleased_indexes = []
@sentry_span
async def fetch_reviews():
return await cls._read_filtered_models(
PullRequestReview, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReview.submitted_at, PullRequestReview.state,
PullRequestReview.user_login, PullRequestReview.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_review_comments():
return await cls._read_filtered_models(
PullRequestReviewComment, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReviewComment.created_at, PullRequestReviewComment.user_login,
PullRequestReviewComment.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_review_requests():
return await cls._read_filtered_models(
PullRequestReviewRequest, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReviewRequest.created_at],
created_at=truncate)
@sentry_span
async def fetch_comments():
return await cls._read_filtered_models(
PullRequestComment, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestComment.created_at, PullRequestComment.user_login,
PullRequestComment.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_commits():
return await cls._read_filtered_models(
PullRequestCommit, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestCommit.authored_date, PullRequestCommit.committed_date,
PullRequestCommit.author_login, PullRequestCommit.committer_login,
PullRequestCommit.author_user_id, PullRequestCommit.committer_user_id],
created_at=truncate)
@sentry_span
async def fetch_labels():
return await cls._read_filtered_models(
PullRequestLabel, node_ids, time_to, meta_ids, mdb,
columns=[sql.func.lower(PullRequestLabel.name).label(PullRequestLabel.name.name),
PullRequestLabel.description,
PullRequestLabel.color],
created_at=False)
fetch_labels_task = asyncio.create_task(
fetch_labels(), name="PullRequestMiner.mine_by_ids/fetch_labels")
@sentry_span
async def map_releases():
anyhow_merged_mask = prs[PullRequest.merged_at.name].notnull().values
if truncate:
merged_mask = (prs[PullRequest.merged_at.name] < time_to).values
nonlocal merged_unreleased_indexes
merged_unreleased_indexes = np.flatnonzero(anyhow_merged_mask & ~merged_mask)
else:
merged_mask = anyhow_merged_mask
if len(unreleased):
prs_index = np.char.add(
int_to_str(prs.index.values),
(prs_repos := prs[PullRequest.repository_full_name.name].values.astype("S")),
)
if isinstance(unreleased, np.ndarray):
unreleased_index = np.char.add(
int_to_str(unreleased[:, 0].astype(int)),
unreleased[:, 1].astype(prs_repos.dtype),
)
else:
unreleased_index = np.char.add(
int_to_str(np.fromiter((p[0] for p in unreleased), int, len(unreleased))),
np.array([p[1] for p in unreleased], dtype=prs_repos.dtype),
)
merged_mask &= np.in1d(prs_index, unreleased_index, invert=True)
merged_prs = prs.take(np.flatnonzero(merged_mask))
nonlocal releases
if extra_releases_task is not None:
await extra_releases_task
extra_releases, _ = extra_releases_task.result()
releases = releases.append(extra_releases, ignore_index=True)
labels = None
if logical_settings.has_logical_prs():
nonlocal physical_repositories
if physical_repositories is None:
physical_repositories = coerce_logical_repos(logical_repositories).keys()
if logical_settings.has_prs_by_label(physical_repositories):
await fetch_labels_task
labels = fetch_labels_task.result()
merged_prs = split_logical_repositories(
merged_prs, labels, logical_repositories, logical_settings)
else:
merged_prs = split_logical_repositories(merged_prs, None, set(), logical_settings)
df_facts, other_facts = await gather(
cls.mappers.map_prs_to_releases(
merged_prs, releases, matched_bys, branches, default_branches, time_to,
dags, release_settings, prefixer, account, meta_ids, mdb, pdb, cache,
labels=labels),
MergedPRFactsLoader.load_merged_unreleased_pull_request_facts(
prs.take(np.flatnonzero(anyhow_merged_mask & ~merged_mask)),
nonemax(releases[Release.published_at.name].nonemax(), time_to),
LabelFilter.empty(), matched_bys, default_branches, release_settings,
prefixer, account, pdb),
)
nonlocal facts
nonlocal unreleased_prs_event
df, facts, unreleased_prs_event = df_facts
facts.update(other_facts)
return df
async def _fetch_labels():
await fetch_labels_task
return fetch_labels_task.result()
@sentry_span
async def fetch_jira():
_map = aliased(NodePullRequestJiraIssues, name="m")
_issue = aliased(Issue, name="i")
_issue_epic = aliased(Issue, name="e")
selected = [
PullRequest.node_id, _issue.key, _issue.title, _issue.type, _issue.status,
_issue.created, _issue.updated, _issue.resolved, _issue.labels, _issue.components,
_issue.acc_id, _issue_epic.key.label("epic"),
]
if not with_jira:
df = pd.DataFrame(columns=[col.name for col in selected
if col not in (_issue.acc_id, _issue.components)])
df[PullRequest.node_id.name] = df[PullRequest.node_id.name].astype(int)
return df.set_index([PullRequest.node_id.name, _issue.key.name])
df = await read_sql_query(
sql.select(selected).select_from(sql.join(
PullRequest, sql.join(
_map, sql.join(_issue, _issue_epic, sql.and_(
_issue.epic_id == _issue_epic.id,
_issue.acc_id == _issue_epic.acc_id), isouter=True),
sql.and_(_map.jira_id == _issue.id,
_map.jira_acc == _issue.acc_id)),
sql.and_(PullRequest.node_id == _map.node_id,
PullRequest.acc_id == _map.node_acc),
)).where(sql.and_(PullRequest.node_id.in_(node_ids),
PullRequest.acc_id.in_(meta_ids),
_issue.is_deleted.is_(False))),
mdb, columns=selected, index=[PullRequest.node_id.name, _issue.key.name])
if df.empty:
df.drop([Issue.acc_id.name, Issue.components.name], inplace=True, axis=1)
return df
components = df[[Issue.acc_id.name, Issue.components.name]] \
.groupby(Issue.acc_id.name, sort=False).aggregate(lambda s: set(flatten(s)))
rows = await mdb.fetch_all(
sql.select([Component.acc_id, Component.id, Component.name])
.where(sql.or_(*(sql.and_(Component.id.in_(vals),
Component.acc_id == int(acc))
for acc, vals in zip(components.index.values,
components[Issue.components.name].values)))))
cmap = {}
for r in rows:
cmap.setdefault(r[0], {})[r[1]] = r[2].lower()
df[Issue.labels.name] = (
df[Issue.labels.name].apply(lambda i: [s.lower() for s in (i or [])])
+
df[[Issue.acc_id.name, Issue.components.name]]
.apply(lambda row: ([cmap[row[Issue.acc_id.name]][c]
for c in row[Issue.components.name]]
if row[Issue.components.name] is not None else []),
axis=1)
)
df.drop([Issue.acc_id.name, Issue.components.name], inplace=True, axis=1)
return df
# the order is important: it provides the best performance
# we launch coroutines from the heaviest to the lightest
dfs = await gather(
fetch_commits(),
map_releases(),
fetch_jira(),
fetch_reviews(),
fetch_review_comments(),
fetch_review_requests(),
fetch_comments(),
_fetch_labels(),
cls.fetch_pr_deployments(node_ids, prefixer, account, pdb, rdb),
)
dfs = PRDataFrames(prs, *dfs)
if len(merged_unreleased_indexes):
# if we truncate and there are PRs merged after `time_to`
merged_unreleased_prs = prs.take(merged_unreleased_indexes)
label_matches = np.flatnonzero(np.in1d(
dfs.labels.index.get_level_values(0).values,
merged_unreleased_prs.index.values))
labels = {}
for k, v in zip(dfs.labels.index.get_level_values(0).values[label_matches],
dfs.labels[PullRequestLabel.name.name].values[label_matches]):
try:
labels[k].append(v)
except KeyError:
labels[k] = [v]
other_unreleased_prs_event = asyncio.Event()
unreleased_prs_event = AllEvents(unreleased_prs_event, other_unreleased_prs_event)
merged_unreleased_prs = split_logical_repositories(
merged_unreleased_prs, dfs.labels, logical_repositories, logical_settings)
await defer(update_unreleased_prs(
merged_unreleased_prs, pd.DataFrame(), time_to, labels, matched_bys,
default_branches, release_settings, account, pdb, other_unreleased_prs_event),
"update_unreleased_prs/truncate(%d)" % len(merged_unreleased_indexes))
return dfs, facts, unreleased_prs_event
@classmethod
@sentry_span
async def mine(cls,
date_from: date,
date_to: date,
time_from: datetime,
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]] = None,
truncate: bool = True,
) -> Tuple["PullRequestMiner",
PullRequestFactsMap,
Dict[str, ReleaseMatch],
asyncio.Event]:
"""
Mine metadata about pull requests according to the numerous filters.
:param account: State DB account ID.
:param meta_ids: Metadata (GitHub) account IDs.
:param date_from: Fetch PRs created starting from this date, inclusive.
:param date_to: Fetch PRs created ending with this date, inclusive.
:param time_from: Precise timestamp of since when PR events are allowed to happen.
:param time_to: Precise timestamp of until when PR events are allowed to happen.
:param repositories: PRs must belong to these repositories (prefix excluded).
:param participants: PRs must have these user IDs in the specified participation roles \
(OR aggregation). An empty dict means everybody.
:param labels: PRs must be labeled according to this filter's include & exclude sets.
:param jira: JIRA filters for those PRs that are matched with JIRA issues.
:param with_jira_map: Value indicating whether we must load JIRA issues mapped to PRs. \
This is independent from filtering PRs by `jira`.
:param branches: Preloaded DataFrame with branches in the specified repositories.
:param default_branches: Mapping from repository names to their default branch names.
:param exclude_inactive: Ors must have at least one event in the given time frame.
:param release_settings: Release match settings of the account.
:param logical_settings: Logical repository settings of the account.
:param updated_min: PRs must have the last update timestamp not older than it.
:param updated_max: PRs must have the last update timestamp not newer than or equal to it.
:param mdb: Metadata db instance.
:param pdb: Precomputed db instance.
:param rdb: Persistentdata db instance.
:param cache: memcached client to cache the collected data.
:param pr_blacklist: completely ignore the existence of these PR node IDs. \
The second tuple element is the ambiguous PRs: released by branch \
while there were no tag releases and the strategy is `tag_or_branch`.
:param truncate: activate the "time machine" and erase everything after `time_to`.
:return: 1. New `PullRequestMiner` with the PRs satisfying to the specified filters. \
2. Precomputed facts about unreleased pull requests. \
This is an optimization which breaks the abstraction a bit. \
3. `matched_bys` - release matches for each repository. \
4. Synchronization for updating the pdb table with merged unreleased PRs. \
Another abstraction leakage that we have to deal with.
"""
date_from_with_time = datetime.combine(date_from, datetime.min.time(), tzinfo=timezone.utc)
date_to_with_time = datetime.combine(date_to, datetime.min.time(), tzinfo=timezone.utc)
assert time_from >= date_from_with_time
assert time_to <= date_to_with_time
dfs, facts, _, _, _, _, _, matched_bys, event = await cls._mine(
date_from, date_to, repositories, participants, labels, jira, with_jira_map, branches,
default_branches, exclude_inactive, release_settings, logical_settings,
updated_min, updated_max, pr_blacklist, truncate, prefixer, account, meta_ids,
mdb, pdb, rdb, cache)
cls._truncate_prs(dfs, time_from, time_to)
return cls(dfs), facts, matched_bys, event
@classmethod
@sentry_span
async def fetch_prs(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
branches: pd.DataFrame,
dags: Optional[Dict[str, DAG]],
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
fetch_branch_dags_task: Optional[asyncio.Task] = None,
with_labels: bool = False,
) -> Tuple[pd.DataFrame, Dict[str, DAG], Optional[pd.DataFrame]]:
"""
Query pull requests from mdb that satisfy the given filters.
Note: we cannot filter by regular PR labels here due to the DB schema limitations,
so the caller is responsible for fetching PR labels and filtering by them afterward.
Besides, we cannot filter by participation roles different from AUTHOR and MERGER.
Note: we cannot load PRs that closed before time_from but released between
`time_from` and `time_to`. Hence the caller should map_releases_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and released
between `time_from` and `time_to`.
Note: we cannot load PRs that closed before time_from but deployed between
`time_from` and `time_to`. Hence the caller should map_deployments_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and deployed
between `time_from` and `time_to`.
We have to resolve the merge commits of rebased PRs so that they do not appear
force-push-dropped.
:return: pandas DataFrame with the PRs indexed by node_id; \
commit DAGs that contain the branch heads; \
(if was required) DataFrame with PR labels.
"""
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
pr_list_coro = cls._fetch_prs_by_filters(
time_from, time_to, repositories, participants, labels, jira, exclude_inactive,
pr_blacklist, pr_whitelist, meta_ids, mdb, cache, columns=columns,
updated_min=updated_min, updated_max=updated_max,
)
if columns is not PullRequest and PullRequest.merge_commit_id not in columns and \
PullRequest.merge_commit_sha not in columns:
prs, labels = await pr_list_coro
return prs, dags, labels if with_labels else None
if fetch_branch_dags_task is None:
fetch_branch_dags_task = cls._fetch_branch_dags(
repositories, dags, branches, account, meta_ids, mdb, pdb, cache)
dags, (prs, labels) = await gather(fetch_branch_dags_task, pr_list_coro)
async def load_labels():
if not with_labels:
return None
if labels is not None:
return labels
return await fetch_labels_to_filter(prs.index.values, meta_ids, mdb)
prs, labels = await gather(
cls.mark_dead_prs(prs, branches, dags, meta_ids, mdb, columns),
load_labels(),
)
return prs, dags, labels
@classmethod
async def mark_dead_prs(cls,
prs: pd.DataFrame,
branches: pd.DataFrame,
dags: Dict[str, DAG],
meta_ids: Tuple[int, ...],
mdb: Database,
columns=PullRequest,
) -> pd.DataFrame:
"""
Add and fill "dead" column in the `prs` DataFrame.
A PR is considered dead (force-push-dropped) if it does not exit in the commit DAG and \
we cannot detect its rebased clone.
"""
prs["dead"] = False
if branches.empty:
return prs
merged_prs = prs.take(np.nonzero((
prs[PullRequest.merged_at.name] <= datetime.now(timezone.utc) - timedelta(hours=1)
).values)[0])
# timedelta(hours=1) must match the `exptime` of `fetch_repository_commits()`
# commits DAGs are cached and may be not fully up to date, so otherwise some PRs may
# appear as wrongly force push dropped; see also: DEV-554
if merged_prs.empty:
return prs
pr_numbers = merged_prs[PullRequest.number.name].values
assert merged_prs.index.nlevels == 1
pr_node_ids = merged_prs.index.values
pr_repos = merged_prs[PullRequest.repository_full_name.name].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = \
merged_prs[PullRequest.merge_commit_sha.name].values.astype("S40")[repo_order]
pos = 0
queries = []
dead = []
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
min_commit_date = merged_prs[PullRequest.merged_at.name].min()
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
sqlite = mdb.url.dialect == "sqlite"
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
if len(dag_hashes) == 0:
# no branches found in `fetch_repository_commits()`
continue
not_found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] != repo_pr_merge_hashes
indexes = repo_order[begin_pos:end_pos][not_found]
dead.extend(dead_node_ids := pr_node_ids[indexes])
repo_cond = PushCommit.repository_full_name == repo
for pr_node_id, n in zip(dead_node_ids, pr_numbers[indexes]):
if sqlite:
# SQLite does not support parameter recycling
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
repo_cond = PushCommit.repository_full_name == repo
queries.append(
sql.select([PushCommit.node_id.label("commit_node_id"),
PushCommit.sha.label("sha"),
sql.literal_column("'" + repo + "'").label("repo"),
sql.literal_column(str(pr_node_id)).label("pr_node_id"),
PushCommit.committed_date,
PushCommit.pushed_date])
.where(sql.and_(acc_id_cond,
repo_cond,
committed_date_cond,
substr.like("Merge pull request #%d from %%" % n))))
if not queries:
return prs
prs.loc[dead, "dead"] = True
# we may have MANY queries here and Postgres responds with StatementTooComplexError
# split them by 100-sized batches to stay below the resource limits
batch_size = 100
tasks = []
for batch_index in range(0, len(queries), batch_size):
batch = queries[batch_index:batch_index + batch_size]
if len(batch) == 1:
query = batch[0]
else:
query = sql.union_all(*batch)
tasks.append(read_sql_query(query, mdb, [
"commit_node_id", "sha", "repo", "pr_node_id",
PushCommit.committed_date, PushCommit.pushed_date,
]))
resolveds = await gather(*tasks, op="mark_dead_prs commit SQL UNION ALL-s")
resolved = pd.concat(resolveds)
# look up the candidates in the DAGs
pr_repos = resolved["repo"].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = resolved["sha"].values.astype("S")[repo_order]
pos = 0
alive_indexes = []
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] == repo_pr_merge_hashes
alive_indexes.extend(repo_order[begin_pos:end_pos][found])
if (resolved := resolved.take(alive_indexes)).empty:
return prs
# take the commit that was committed the latest; if there are multiple, prefer the one
# with pushed_date = null
resolved.sort_values([PushCommit.committed_date.name, PushCommit.pushed_date.name],
ascending=False, inplace=True, na_position="first")
resolved.drop_duplicates("pr_node_id", inplace=True)
# patch the commit IDs and the hashes
alive_node_ids = resolved["pr_node_id"].values
if columns is PullRequest or PullRequest.merge_commit_id in columns:
prs.loc[alive_node_ids, PullRequest.merge_commit_id.name] = \
resolved["commit_node_id"].values
if columns is PullRequest or PullRequest.merge_commit_sha in columns:
prs.loc[alive_node_ids, PullRequest.merge_commit_sha.name] = resolved["sha"].values
prs.loc[alive_node_ids, "dead"] = False
return prs
@classmethod
async def _fetch_branch_dags(cls,
repositories: Iterable[str],
dags: Optional[Dict[str, DAG]],
branches: pd.DataFrame,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
if dags is None:
dags = await fetch_precomputed_commit_history_dags(
repositories, account, pdb, cache)
return await fetch_repository_commits_no_branch_dates(
dags, branches, BRANCH_FETCH_COMMITS_COLUMNS, True, account, meta_ids,
mdb, pdb, cache)
@classmethod
@sentry_span
async def _fetch_prs_by_filters(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
meta_ids: Tuple[int, ...],
mdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
assert (updated_min is None) == (updated_max is None)
filters = [
(sql.case(
[(PullRequest.closed, PullRequest.closed_at)],
else_=sql.text("'3000-01-01'"), # backed up with a DB index
) >= time_from) if time_from is not None else sql.true(),
PullRequest.created_at < time_to,
PullRequest.acc_id.in_(meta_ids),
PullRequest.hidden.is_(False),
PullRequest.repository_full_name.in_(repositories),
]
if exclude_inactive and updated_min is None:
# this does not provide 100% guarantee because it can be after time_to,
# we need to properly filter later
filters.append(PullRequest.updated_at >= time_from)
if updated_min is not None:
filters.append(PullRequest.updated_at.between(updated_min, updated_max))
if pr_blacklist is not None:
filters.append(pr_blacklist)
if pr_whitelist is not None:
filters.append(pr_whitelist)
if len(participants) == 1:
if PRParticipationKind.AUTHOR in participants:
filters.append(PullRequest.user_login.in_(
participants[PRParticipationKind.AUTHOR]))
elif PRParticipationKind.MERGER in participants:
filters.append(
PullRequest.merged_by_login.in_(participants[PRParticipationKind.MERGER]))
elif len(participants) == 2 and PRParticipationKind.AUTHOR in participants and \
PRParticipationKind.MERGER in participants:
filters.append(sql.or_(
PullRequest.user_login.in_(participants[PRParticipationKind.AUTHOR]),
PullRequest.merged_by_login.in_(participants[PRParticipationKind.MERGER]),
))
if columns is PullRequest:
selected_columns = [PullRequest]
remove_acc_id = False
else:
selected_columns = columns = list(columns)
if remove_acc_id := (PullRequest.acc_id not in selected_columns):
selected_columns.append(PullRequest.acc_id)
if PullRequest.merge_commit_id in columns or PullRequest.merge_commit_sha in columns:
# needed to resolve rebased merge commits
if PullRequest.number not in selected_columns:
selected_columns.append(PullRequest.number)
if labels:
singles, multiples = LabelFilter.split(labels.include)
embedded_labels_query = not multiples
if all_in_labels := (set(singles + list(chain.from_iterable(multiples)))):
filters.append(
sql.exists().where(sql.and_(
PullRequestLabel.acc_id == PullRequest.acc_id,
PullRequestLabel.pull_request_node_id == PullRequest.node_id,
sql.func.lower(PullRequestLabel.name).in_(all_in_labels),
)))
if labels.exclude:
filters.append(
sql.not_(sql.exists().where(sql.and_(
PullRequestLabel.acc_id == PullRequest.acc_id,
PullRequestLabel.pull_request_node_id == PullRequest.node_id,
sql.func.lower(PullRequestLabel.name).in_(labels.exclude),
))))
if not jira:
query = sql.select(selected_columns).where(sql.and_(*filters))
else:
query = await generate_jira_prs_query(
filters, jira, None, mdb, cache, columns=selected_columns)
prs = await read_sql_query(query, mdb, columns, index=PullRequest.node_id.name)
if remove_acc_id:
del prs[PullRequest.acc_id.name]
if PullRequest.closed.name in prs:
cls.adjust_pr_closed_merged_timestamps(prs)
_, first_encounters = np.unique(prs.index.values, return_index=True)
if len(first_encounters) < len(prs):
prs = prs.take(first_encounters)
if not labels or embedded_labels_query:
return prs, None
df_labels = await fetch_labels_to_filter(prs.index, meta_ids, mdb)
left = cls.find_left_by_labels(
prs.index, df_labels.index, df_labels[PullRequestLabel.name.name].values, labels)
prs = prs.take(np.flatnonzero(prs.index.isin(left)))
return prs, df_labels
@staticmethod
def adjust_pr_closed_merged_timestamps(prs_df: pd.DataFrame) -> None:
"""Force set `closed_at` and `merged_at` to NULL if not `closed`. Remove `closed`."""
not_closed = ~prs_df[PullRequest.closed.name].values
prs_df.loc[not_closed, PullRequest.closed_at.name] = pd.NaT
prs_df.loc[not_closed, PullRequest.merged_at.name] = pd.NaT
prs_df.drop(columns=PullRequest.closed.name, inplace=True)
@classmethod
@sentry_span
async def _fetch_inactive_merged_unreleased_prs(
cls,
time_from: datetime,
time_to: datetime,
repos: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
has_logical_repos: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client]) -> pd.DataFrame:
node_id_map = await discover_inactive_merged_unreleased_prs(
time_from, time_to, repos, participants, labels, default_branches, release_settings,
prefixer, account, pdb, cache)
if not jira:
return await read_sql_query(sql.select([PullRequest])
.where(PullRequest.node_id.in_(node_id_map)),
mdb, PullRequest, index=PullRequest.node_id.name)
df = await cls.filter_jira(node_id_map, jira, meta_ids, mdb, cache)
if not has_logical_repos:
return df
append = defaultdict(list)
node_ids = df.index.values
repository_full_names = df[PullRequest.repository_full_name.name].values
for i, (pr_node_id, physical_repo) in enumerate(zip(node_ids, repository_full_names)):
logical_repos = node_id_map[pr_node_id]
if physical_repo != (first_logical_repo := logical_repos[0]):
repository_full_names[i] = first_logical_repo
for logical_repo in logical_repos[1:]:
append[logical_repo].append(i)
if append:
chunks = []
for logical_repo, indexes in append.items():
subdf = df.take(indexes)
subdf[PullRequest.repository_full_name.name] = logical_repo
chunks.append(subdf)
df = pd.concat([df] + chunks)
return df
@classmethod
@sentry_span
async def filter_jira(cls,
pr_node_ids: Collection[int],
jira: JIRAFilter,
meta_ids: Tuple[int, ...],
mdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest) -> pd.DataFrame:
"""Filter PRs by JIRA properties."""
assert jira
filters = [PullRequest.node_id.in_(pr_node_ids)]
query = await generate_jira_prs_query(filters, jira, meta_ids, mdb, cache, columns=columns)
query = query.with_statement_hint(f"Rows(pr repo #{len(pr_node_ids)})")
return await read_sql_query(
query, mdb, columns, index=PullRequest.node_id.name)
@classmethod
@sentry_span
async def fetch_pr_deployments(cls,
pr_node_ids: Iterable[int],
prefixer: Prefixer,
account: int,
pdb: Database,
rdb: Database,
) -> pd.DataFrame:
"""Load the deployments for each PR node ID."""
ghprd = GitHubPullRequestDeployment
cols = [ghprd.pull_request_id, ghprd.deployment_name, ghprd.repository_id]
df = await read_sql_query(
sql.select(cols)
.where(sql.and_(ghprd.acc_id == account,
ghprd.pull_request_id.in_any_values(pr_node_ids))),
con=pdb, columns=cols, index=ghprd.deployment_name.name)
cols = [DeploymentNotification.name,
DeploymentNotification.environment,
DeploymentNotification.conclusion,
DeploymentNotification.finished_at]
details = await read_sql_query(
sql.select(cols)
.where(sql.and_(DeploymentNotification.account_id == account,
DeploymentNotification.name.in_(df.index.values))),
con=rdb, columns=cols, index=DeploymentNotification.name.name,
)
details.index.name = ghprd.deployment_name.name
df = df.join(details)
df.reset_index(inplace=True)
df.set_index([ghprd.pull_request_id.name, ghprd.deployment_name.name], inplace=True)
repo_node_to_name = prefixer.repo_node_to_name.get
df[PullRequest.repository_full_name.name] = \
[repo_node_to_name(r) for r in df[ghprd.repository_id.name].values]
return df
@staticmethod
def _check_participants_compatibility(cached_participants: PRParticipants,
participants: PRParticipants) -> bool:
if not cached_participants:
return True
if not participants:
return False
for k, v in participants.items():
if v - cached_participants.get(k, set()):
return False
return True
@classmethod
@sentry_span
def _remove_spurious_prs(cls, time_from: datetime, dfs: PRDataFrames) -> None:
old_releases = np.where(dfs.releases[Release.published_at.name] < time_from)[0]
if len(old_releases) == 0:
return
cls._drop(dfs, dfs.releases.index[old_releases])
@classmethod
def _drop(cls, dfs: PRDataFrames, pr_ids: Collection[int]) -> None:
if len(pr_ids) == 0:
return
for df in dfs.values():
df.drop(pr_ids,
level=0 if isinstance(df.index, pd.MultiIndex) else None,
inplace=True,
errors="ignore")
@classmethod
@sentry_span
def _find_drop_by_participants(cls,
dfs: PRDataFrames,
participants: PRParticipants,
time_to: Optional[datetime],
) -> pd.Index:
if not participants:
return pd.Index([])
if time_to is not None:
for df_name, col in (("commits", PullRequestCommit.committed_date),
("reviews", PullRequestReview.created_at),
("review_comments", PullRequestReviewComment.created_at),
("review_requests", PullRequestReviewRequest.created_at),
("comments", PullRequestComment.created_at)):
df = getattr(dfs, df_name)
setattr(dfs, df_name, df.take(np.where(df[col.name] < time_to)[0]))
passed = []
dict_iter = (
(dfs.prs, PullRequest.user_login, None, PRParticipationKind.AUTHOR),
(dfs.prs, PullRequest.merged_by_login, PullRequest.merged_at, PRParticipationKind.MERGER), # noqa
(dfs.releases, Release.author, Release.published_at, PRParticipationKind.RELEASER),
)
for df, part_col, date_col, pk in dict_iter:
col_parts = participants.get(pk)
if not col_parts:
continue
mask = df[part_col.name].isin(col_parts)
if time_to is not None and date_col is not None:
mask &= df[date_col.name] < time_to
passed.append(df.index.get_level_values(0).take(np.flatnonzero(mask)))
reviewers = participants.get(PRParticipationKind.REVIEWER)
if reviewers:
ulkr = PullRequestReview.user_login.name
ulkp = PullRequest.user_login.name
user_logins = pd.merge(dfs.reviews[ulkr].droplevel(1), dfs.prs[ulkp],
left_index=True, right_index=True, how="left", copy=False)
ulkr += "_x"
ulkp += "_y"
passed.append(user_logins.index.take(np.where(
(user_logins[ulkr] != user_logins[ulkp]) & user_logins[ulkr].isin(reviewers),
)[0]).unique())
for df, col, pk in (
(dfs.comments, PullRequestComment.user_login, PRParticipationKind.COMMENTER),
(dfs.commits, PullRequestCommit.author_login, PRParticipationKind.COMMIT_AUTHOR),
(dfs.commits, PullRequestCommit.committer_login, PRParticipationKind.COMMIT_COMMITTER)): # noqa
col_parts = participants.get(pk)
if not col_parts:
continue
passed.append(df.index.get_level_values(0).take(np.flatnonzero(
df[col.name].isin(col_parts))).unique())
while len(passed) > 1:
new_passed = []
for i in range(0, len(passed), 2):
if i + 1 < len(passed):
new_passed.append(passed[i].union(passed[i + 1]))
else:
new_passed.append(passed[i])
passed = new_passed
return dfs.prs.index.get_level_values(0).difference(passed[0])
@classmethod
@sentry_span
def _find_drop_by_labels(cls, dfs: PRDataFrames, labels: LabelFilter) -> pd.Index:
if not labels:
return pd.Index([])
df_labels_index = dfs.labels.index.get_level_values(0)
df_labels_names = dfs.labels[PullRequestLabel.name.name].values
pr_node_ids = dfs.prs.index.get_level_values(0)
left = cls.find_left_by_labels(pr_node_ids, df_labels_index, df_labels_names, labels)
if not labels.include:
return df_labels_index.difference(left)
return pr_node_ids.difference(left)
@classmethod
def find_left_by_labels(cls,
full_index: pd.Index,
df_labels_index: pd.Index,
df_labels_names: Sequence[str],
labels: LabelFilter) -> pd.Index:
"""
Post-filter PRs by their loaded labels.
:param full_index: All the PR node IDs, not just those that correspond to labeled PRs.
:param df_labels_index: (PR node ID, label name) DataFrame index. There may be several \
rows for the same PR node ID.
:param df_labels_names: (PR node ID, label name) DataFrame column.
"""
left_include = left_exclude = None
if labels.include:
singles, multiples = LabelFilter.split(labels.include)
left_include = df_labels_index.take(
np.nonzero(np.in1d(df_labels_names, singles))[0],
).unique()
for group in multiples:
passed = df_labels_index
for label in group:
passed = passed.intersection(
df_labels_index.take(np.nonzero(df_labels_names == label)[0]))
if passed.empty:
break
left_include = left_include.union(passed)
if labels.exclude:
left_exclude = full_index.difference(df_labels_index.take(
np.nonzero(np.in1d(df_labels_names, list(labels.exclude)))[0],
).unique())
if labels.include:
if labels.exclude:
left = left_include.intersection(left_exclude)
else:
left = left_include
else:
left = left_exclude
return left
@classmethod
@sentry_span
def _find_drop_by_jira(cls, dfs: PRDataFrames, jira: JIRAFilter) -> pd.Index:
if not jira:
return pd.Index([])
left = []
jira_index = dfs.jiras.index.get_level_values(0)
pr_node_ids = dfs.prs.index.get_level_values(0)
if jira.labels:
df_labels_names = dfs.jiras[Issue.labels.name].values
df_labels_index = pd.Index(np.repeat(jira_index, [len(v) for v in df_labels_names]))
df_labels_names = list(pd.core.common.flatten(df_labels_names))
# if jira.labels.include is empty, we effectively drop all unmapped PRs
# that is the desired behavior
left.append(cls.find_left_by_labels(
pr_node_ids, df_labels_index, df_labels_names, jira.labels))
if jira.epics:
left.append(jira_index.take(np.where(
dfs.jiras["epic"].isin(jira.epics))[0]).unique())
if jira.issue_types:
left.append(dfs.jiras.index.get_level_values(0).take(np.where(
dfs.jiras[Issue.type.name].str.lower().isin(jira.issue_types))[0]).unique())
result = left[0]
for other in left[1:]:
result = result.intersection(other)
return pr_node_ids.difference(result)
@classmethod
@sentry_span
def _find_drop_by_inactive(cls,
dfs: PRDataFrames,
time_from: datetime,
time_to: datetime) -> pd.Index:
activities = [
dfs.prs[PullRequest.created_at.name],
dfs.prs[PullRequest.closed_at.name],
dfs.commits[PullRequestCommit.committed_date.name],
dfs.review_requests[PullRequestReviewRequest.created_at.name],
dfs.reviews[PullRequestReview.created_at.name],
dfs.comments[PullRequestComment.created_at.name],
dfs.releases[Release.published_at.name],
dfs.deployments[DeploymentNotification.finished_at.name],
]
for df in activities:
if df.index.nlevels > 1:
df.index = df.index.droplevel(1)
df.name = "timestamp"
activities = | pd.concat(activities, copy=False) | pandas.concat |
import os
import subprocess
import re
import json
import time
import pandas as pd
from keyboard import press
from shutil import copy
from distutils.dir_util import copy_tree
class Script(object):
"""Master object for holding and modifying .cmd script settings,
creating .cmd files, and running them through Vensim/Vengine"""
def __init__(self, controlfile):
print("Initialising", self)
for k, v in controlfile['simsettings'].items():
self.__setattr__(k, v if isinstance(v, str) else v.copy())
self.runcmd = "MENU>RUN_OPTIMIZE|o\n"
self.savecmd = f"MENU>VDF2TAB|!|!|{self.savelist}|\n"
self.basename = controlfile['baserunname']
self.cmdtext = []
def copy_model_files(self, dirname):
"""Create subdirectory and copy relevant model files to it,
then change working directory to subdirectory"""
os.makedirs(dirname, exist_ok=True)
os.chdir(f"./{dirname}")
# Copy needed files from the working directory into the sub-directory
for s in ['model', 'payoff', 'optparm', 'sensitivity', 'savelist', 'senssavelist']:
if getattr(self, s):
copy(f"../{getattr(self, s)}", "./")
for slist in ['data', 'changes']:
for file in getattr(self, slist):
copy(f"../{file}", "./")
def add_suffixes(self, settingsfxs):
"""Cleanly modifies .cmd script settings with specified suffixes"""
for s, sfx in settingsfxs.items():
if hasattr(self, s):
self.__setattr__(s, getattr(self, s)[:-4] + sfx + getattr(self, s)[-4:])
def update_changes(self, chglist):
"""Reformats chglist as needed to extend changes settings;
see compile_script for details"""
# Combines and flattens list of paired change names & suffixes
flatlist = [i for s in
[[f"{self.basename}_{n}_{sfx}.out" for n in name]
if isinstance(name, list) else [f"{self.basename}_{name}_{sfx}.out"]
for name, sfx in chglist] for i in s]
self.changes.extend(flatlist)
def write_script(self, scriptname):
"""Compiles and writes actual .cmd script file"""
self.cmdtext.extend(["SPECIAL>NOINTERACTION\n",
f"SPECIAL>LOADMODEL|{self.model}\n"])
for s in ['payoff', 'sensitivity', 'optparm', 'savelist', 'senssavelist']:
if hasattr(self, s):
self.cmdtext.append(f"SIMULATE>{s}|{getattr(self, s)}\n")
if hasattr(self, 'data'):
datatext = ','.join(self.data)
self.cmdtext.append(f"SIMULATE>DATA|\"{','.join(self.data)}\"\n")
if hasattr(self, 'changes'):
self.cmdtext.append(f"SIMULATE>READCIN|{self.changes[0]}\n")
for file in self.changes[1:]:
self.cmdtext.append(f"SIMULATE>ADDCIN|{file}\n")
self.cmdtext.extend(["\n", f"SIMULATE>RUNNAME|{scriptname}\n",
self.runcmd, self.savecmd,
"SPECIAL>CLEARRUNS\n", "MENU>EXIT\n"])
with open(f"{scriptname}.cmd", 'w') as scriptfile:
scriptfile.writelines(self.cmdtext)
def run_script(self, scriptname, controlfile, subdir, logfile):
"""Runs .cmd script file using function robust to
Vengine errors, and returns payoff value if applicable"""
return run_vengine_script(scriptname, controlfile['vensimpath'],
controlfile['timelimit'], '.log', check_opt, logfile)
class CtyScript(Script):
"""Script subclass for country optimization runs"""
def __init__(self, controlfile):
super().__init__(controlfile)
self.genparams = controlfile['genparams'].copy()
def prep_subdir(self, scriptname, controlfile, subdir):
"""Creates subdirectory for country-specific files and output"""
self.copy_model_files(subdir)
copy(f"../{scriptname}.cmd", "./")
self.genparams.append(f"[{subdir}]")
for file in self.changes:
clean_outfile(file, self.genparams)
def run_script(self, scriptname, controlfile, subdir, logfile):
self.prep_subdir(scriptname, controlfile, subdir)
run_vengine_script(scriptname, controlfile['vensimpath'],
controlfile['timelimit'], '.log', check_opt, logfile)
copy(f"./{scriptname}.out", "..") # Copy the .out file to parent directory
os.chdir("..")
class CtyMCScript(CtyScript):
"""Script subclass for country MCMC optimizations"""
def run_script(self, scriptname, controlfile, subdir, logfile):
self.prep_subdir(scriptname, controlfile, subdir)
run_vengine_script(scriptname, controlfile['vensimpath'],
controlfile['timelimit'], '_MCMC_points.tab', check_MC, logfile)
# Create downsample and copy to parent directory
downsample(scriptname, controlfile['samplefrac'])
copy(f"./{scriptname}_MCMC_sample_frac.tab", "..")
copy(f"./{scriptname}.out", "..") # Copy the .out file to parent directory
os.chdir("..")
class LongScript(Script):
"""Script subclass for long calibration runs e.g. all-params"""
def run_script(self, scriptname, controlfile, subdir, logfile):
return run_vengine_script(scriptname, controlfile['vensimpath'],
controlfile['timelimit']*5, '.log', check_opt, logfile)
class ScenScript(Script):
"""Script subclass for scenario analysis with .cin files"""
def update_changes(self, chglist):
scen = chglist.pop()
super().update_changes(chglist)
self.changes.append(scen)
chglist.append(scen)
def run_script(self, scriptname, controlfile, subdir, logfile):
return run_vengine_script(scriptname, controlfile['vensimpath'],
controlfile['timelimit'], '.vdf', check_run, logfile)
class ScenRunScript(ScenScript):
"""Script subclass for scenario analysis runs (not optimizations)"""
def __init__(self, controlfile):
super().__init__(controlfile)
self.runcmd = "MENU>RUN|o\n"
class ScenSensScript(ScenScript):
"""Script subclass for scenario sensitivity analysis"""
def __init__(self, controlfile):
super().__init__(controlfile)
self.sensitivity = self.basename + '_full.vsc'
self.runcmd = "MENU>RUN_SENSITIVITY|o\n"
self.savecmd = f"MENU>SENS2FILE|!|!|%#[\n"
class SMSensScript(ScenScript):
"""Script subclass for submodel sensitivity analysis"""
def __init__(self, controlfile):
super().__init__(controlfile)
self.runcmd = "MENU>RUN_SENSITIVITY|o\n"
self.savecmd = f"MENU>SENS2FILE|!|!|>T\n"
def compile_script(controlfile, scriptclass, name, namesfx, settingsfxs,
logfile, chglist=[], subdir=None):
"""Master function for assembling & running .cmd script
Parameters
----------
controlfile : JSON object
Master control file specifying sim settings, runname, etc.
scriptclass : Script object
Type of script object to instantiate, depending on run type
name : str
namesfx : str
Along with `name`, specifies name added to baserunname for run
settingsfxs : dict of str
Dict of suffixes to append to filenames in simsettings; use to
distinguish versions of e.g. .mdl, .voc, .vpd etc. files
logfile : str of filename/path
chglist : list of tuples of (str or list, str)
Specifies changes files to be used in script; specify as tuples
corresponding to `name`, `namesfx` of previous run .out to use;
tuples can also take a list of `names` as first element, taking
each with the same second element; `chglist` can also take one
non-tuple str as its last element, which will be added directly
(e.g. for policy scenario .cin files)
subdir : str, optional
Name of subdirectory to create/use for run, if applicable
Returns
-------
float
Payoff value of the script run, if applicable, else 0
"""
mainscript = scriptclass(controlfile)
mainscript.add_suffixes(settingsfxs)
mainscript.update_changes(chglist)
scriptname = f"{mainscript.basename}_{name}_{namesfx}"
mainscript.write_script(scriptname)
return mainscript.run_script(scriptname, controlfile, subdir, logfile)
def write_log(string, logfile):
"""Writes printed script output to a logfile"""
with open(logfile,'a') as f:
f.write(string + "\n")
print(string)
def check_opt(scriptname, logfile):
"""Check function for use with run_vengine_script for optimizations"""
if check_zeroes(scriptname):
write_log(f"Help! {scriptname} is being repressed!", logfile)
return not check_zeroes(scriptname)
def check_MC(scriptname, logfile, threshold=0.01):
"""Check function for use with run_vengine_script for MCMC"""
if abs(compare_payoff(scriptname, logfile)) >= threshold:
write_log(f"{scriptname} is a self-perpetuating autocracy! re-running MC...", logfile)
return False
return True
def check_run(scriptname, logfile):
"""Check function for use with run_vengine_script for normal & sens runs"""
if not os.path.exists(f"./{scriptname}.vdf"):
write_log(f"Help! {scriptname} is being repressed!", logfile)
return os.path.exists(f"./{scriptname}.vdf")
def run_vengine_script(scriptname, vensimpath, timelimit, checkfile, check_func, logfile):
"""Call Vensim with command script using subprocess; monitor output
file for changes to see if Vensim has stalled out, and restart if
it does, or otherwise bugs out; return payoff if applicable"""
write_log(f"Initialising {scriptname}!", logfile)
while True:
proc = subprocess.Popen(f"{vensimpath} \"./{scriptname}.cmd\"")
time.sleep(2)
press('enter') # Necessary to bypass the popup message in Vengine
while True:
try:
# Break out of loop if run completes within specified timelimit
proc.wait(timeout=timelimit)
break
except subprocess.TimeoutExpired:
try:
# If run not complete before timelimit, check to see if still ongoing
write_log(f"Checking for {scriptname}{checkfile}...", logfile)
timelag = time.time() - os.path.getmtime(f"./{scriptname}{checkfile}")
if timelag < (timelimit):
write_log(f"At {time.ctime()}, {round(timelag,3)}s since last output, "
"continuing...", logfile)
continue
else:
# If output isn't being written, kill and restart run
proc.kill()
write_log(f"At {time.ctime()}, {round(timelag,3)}s since last output. "
"Calibration timed out!", logfile)
break
except FileNotFoundError:
# If output isn't being written, kill and restart run
proc.kill()
write_log("Calibration timed out!", logfile)
break
if proc.returncode != 1: # Note that Vengine returns 1 on MENU>EXIT, not 0!
write_log(f"Return code is {proc.returncode}", logfile)
write_log("Vensim! Trying again...", logfile)
continue
try:
# Ensure output is not bugged (specifics depend on type of run)
if check_func(scriptname, logfile):
break
except FileNotFoundError:
write_log("Outfile not found! That's it, I'm dead.", logfile)
pass
time.sleep(2)
if os.path.exists(f"./{scriptname}.out"):
payoffvalue = read_payoff(f"{scriptname}.out")
write_log(f"Payoff for {scriptname} is {payoffvalue}, calibration complete!", logfile)
return payoffvalue
return 0 # Set default payoff value for simtypes that don't generate one
def modify_mdl(country, modelname, newmodelname):
"""Opens .mdl as text, identifies Rgn subscript, and replaces
with appropriate country name"""
with open(modelname,'r') as f:
filedata = f.read()
rgnregex = re.compile(r"Rgn(\s)*?:(\n)?[\s\S]*?(\n\t~)")
newdata = rgnregex.sub(f"Rgn:\n\t{country}\n\t~", filedata)
with open(newmodelname,'w') as f:
f.write(newdata)
def split_voc(vocname, fractolfactor, mcsettings):
"""Splits .VOC file into multiple versions, for main, country, initial,
full model, general MCMC, and country MCMC calibration"""
with open(vocname,'r') as f0:
filedata = f0.readlines()
vocmain = [line for line in filedata if line[0] == ':' or '[Rgn]' not in line]
voccty = [line for line in filedata if line[0] == ':' or '[Rgn]' in line]
vocfull = filedata.copy()
vocinit = voccty.copy()
# Identify and multiply fracitonal tolerance by fractolfactor for initial runs
for l, line in enumerate(vocinit):
if ':FRACTIONAL_TOLERANCE' in line:
fractol = float(line.split('=')[1])
vocinit[l] = f":FRACTIONAL_TOLERANCE={min(fractol*fractolfactor,0.1)}\n"
# Set restarts to 1 for vocs besides initial
for voc in (vocmain, voccty, vocfull):
for l, line in enumerate(voc):
if ':RESTART_MAX' in line:
voc[l] = ':RESTART_MAX=1\n'
vocmainmc = ''.join(vocmain)
vocctymc = ''.join(voccty)
# Make necessary substitutions for MCMC settings
for k,v in mcsettings.items():
vocmainmc = re.sub(f":{re.escape(k)}=.*", f":{k}={v}", vocmainmc)
vocctymc = re.sub(f":{re.escape(k)}=.*", f":{k}={v}", vocctymc)
# Write various voc versions to separate .voc files
for fname, suffix in zip([vocmain, voccty, vocinit, vocfull, vocmainmc, vocctymc],
['m', 'c', 'i', 'f', 'mmc', 'cmc']):
with open(f"{vocname[:-4]}_{suffix}.voc", 'w') as f:
f.writelines(fname)
def check_zeroes(scriptname):
"""Check if an .out file has any parameters set to zero (indicates Vengine error),
return True if any parameters zeroed OR if # runs = # restarts, and False otherwise"""
filename = f"{scriptname}.out"
with open(filename,'r') as f0:
filedata = f0.readlines()
checklist = []
for line in filedata:
if line[0] != ':':
if ' = 0 ' in line:
checklist.append(True)
else:
checklist.append(False)
elif ':RESTART_MAX' in line:
restarts = re.findall(r'\d+', line)[0]
# Ensure number of simulations != number of restarts
if f"After {restarts} simulations" in filedata[0]:
checklist.append(True)
return any(checklist)
def clean_outfile(outfilename, linekey):
"""Clean an outfile to include only lines containing a string in [linekey]
Note that [linekey] should be a list of strings to keep"""
with open(outfilename,'r') as f:
filedata = f.readlines()
newdata = [line for line in filedata if any(k in line for k in linekey)]
with open(outfilename, 'w') as f:
f.writelines(newdata)
def create_mdls(controlfile, logfile):
"""Creates copies of the base .mdl file for each country in list (and one main copy)
and splits .VOC files"""
model = controlfile['simsettings']['model']
for c in controlfile['countrylist']:
newmodel = model[:-4] + f'_{c}.mdl'
modify_mdl(c, model, newmodel)
mainmodel = model[:-4] + '_main.mdl'
c_list = [f'{c}\\\n\t\t' if i % 8 == 7 else c for i,c in enumerate(countrylist)]
countrylist_str = str(c_list)[1:-1].replace("'","")
modify_mdl(countrylist_str, model, mainmodel)
split_voc(controlfile['simsettings']['optparm'],
controlfile['fractolfactor'], controlfile['mcsettings'])
write_log("Files are ready! moving to calibration", logfile)
def read_payoff(outfile, line=1):
"""Identifies payoff value from .OUT or .REP file -
use line 1 (default) for .OUT, or use line 0 for .REP"""
with open(outfile) as f:
payoffline = f.readlines()[line]
payoffvalue = [float(s) for s in re.findall(r'-?\d+\.?\d+[eE+-]*\d+', payoffline)][0]
return payoffvalue
def compare_payoff(scriptname, logfile):
"""Returns the difference in payoffs between .OUT and .REP file,
which should be zero in most cases except when MCMC bugs out"""
difference = read_payoff(f"{scriptname}.out") - read_payoff(f"{scriptname}.rep", 0)
write_log(f".OUT and .REP payoff difference is {difference}", logfile)
return difference
def increment_seed(vocfile, logfile):
"""Increments random number seed in a .VOC file by 1"""
with open(vocfile, 'r') as f:
vocdata = f.read()
seedregex = re.compile(r':SEED=\d+')
try:
i = int(re.search(r'\d+', re.search(seedregex, vocdata).group()).group())
newdata = seedregex.sub(f":SEED={i+1}", vocdata)
with open(vocfile, 'w') as f:
f.write(newdata)
except:
write_log("No seed found, skipping incrementing.", logfile)
def downsample(scriptname, samplefrac):
"""Downsamples an MCMC _sample tab file according to specified
samplefrac, then deletes MCMC _sample and _points files to free
up disk space (files can be VERY large otherwise!)"""
rawdf = pd.read_csv(f"{scriptname}_MCMC_sample.tab", sep='\t')
newdf = rawdf.sample(frac=samplefrac)
newdf.to_csv(f"{scriptname}_MCMC_sample_frac.tab", sep='\t', index=False)
os.remove(f"{scriptname}_MCMC_sample.tab")
os.remove(f"{scriptname}_MCMC_points.tab")
def merge_samples(baserunname, countrylist):
"""Combines downsampled MCMC outputs into a single sensitivity input
tabfile and creates .vsc file using it for sensitivity control"""
filelist = [f"{baserunname}_{c}_MC_MCMC_sample_frac.tab" for c in countrylist]
dflist = []
for f in filelist:
ctydf = pd.read_csv(f, sep='\t')
dflist.append(ctydf)
sensdf = | pd.concat(dflist, axis=1) | pandas.concat |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:3]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition,
transition=transition)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [0.0, 1.0]], index=midx,
columns=cols)
assert_frame_equal(wts, wts_exp)
def test_during_roll_two_generics_one_day_static_transition(dates):
contract_dates = dates
ts = dates.iloc[0] + BDay(-1)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 0.5, ts), (0, 'CLZ16', 0.5, ts),
(1, 'CLZ16', 0.5, ts), (1, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_invalid_contract_dates():
ts = [pd.Timestamp("2016-10-19")]
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-1, 0]
trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
non_unique_index = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-11-21')],
index=['instr1', 'instr1'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_index, mappings.static_transition,
transition=trans)
non_unique_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-20')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.roller(ts, non_unique_vals, mappings.static_transition,
transition=trans)
non_monotonic_vals = pd.Series([pd.Timestamp('2016-10-20'),
pd.Timestamp('2016-10-19')],
index=['instr1', 'instr2'])
with pytest.raises(ValueError):
mappings.static_transition(ts[0], non_monotonic_vals, transition=trans)
not_enough_vals = pd.Series([pd.Timestamp('2016-10-19')],
index=['instr1'])
with pytest.raises(IndexError):
mappings.static_transition(ts[0], not_enough_vals, transition=trans)
def test_during_roll_two_generics_one_day_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates
timestamps = pd.DatetimeIndex([dt + BDay(-1)])
cols = | pd.MultiIndex.from_product([[0, 1], ['front', 'back']]) | pandas.MultiIndex.from_product |
import pytest
import numpy as np
import pandas as pd
from systrade.trading.brokers import PaperBroker
T_START = pd.to_datetime('2019/07/10-09:30:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
T_END = pd.to_datetime('2019/07/10-10:00:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
TIMEINDEX = pd.date_range(start=T_START,end=T_END,freq='1min')
DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX)) ,
'tick1':np.arange(len(TIMEINDEX)-1,-1,-1)},
index=TIMEINDEX)
# DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX))},
# index=TIMEINDEX)
class TestPaperBroker:
def test_init(self):
testseries = pd.Series(np.arange(10))
with pytest.raises(TypeError):
broker = PaperBroker(testseries)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,slippage_time=1.0)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,transaction_cost=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,transaction_cost=-0.5)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,spread_pct=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=-0.5)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=200)
def test_next_extant_time(self):
broker = PaperBroker(DATA_DF)
t_get = pd.to_datetime('2019/07/10-09:35:05:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t_out = broker.next_extant_time(t_get)
t_expect = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
assert t_out==t_expect
t_get = pd.to_datetime('2019/07/10-11:35:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
t_out = broker.next_extant_time(t_get)
def test_get_timeindex_subset(self):
broker = PaperBroker(DATA_DF)
t0 = pd.to_datetime('2019/07/10-09:29:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
t0 = pd.to_datetime('2019/07/10-09:34:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-11:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(t0,1)
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
tind = broker.get_timeindex_subset(t0,t1)
print(tind)
print(pd.date_range(t0,t1,freq='1min'))
assert np.array_equal(tind.values,pd.date_range(t0,t1,freq='1min').values)
def test_get_firstlast_times(self):
broker = PaperBroker(DATA_DF)
t0,t1 = broker.get_firstlast_times()
assert t0==T_START
assert t1==T_END
def test_get_tick_list(self):
broker = PaperBroker(DATA_DF)
ticks = broker.get_tick_list()
assert ticks == ['tick0','tick1']
def test_get_price_list(self):
broker = PaperBroker(DATA_DF)
t0 = T_START
t1 = T_START + pd.DateOffset(minutes=5)
with pytest.raises(ValueError):
prices = broker.get_price_list('badtick',t0,t1)
with pytest.raises(ValueError):
prices = broker.get_price_list(['badtick'],t0,t1)
prices = broker.get_price_list('tick0',t0,t1)
assert np.array_equal(prices['tick0'].values , np.arange(6) )
prices = broker.get_price_list(['tick0','tick1'],t0,t1)
assert np.array_equal(prices['tick0'].values , np.arange(6) )
assert np.array_equal(prices['tick1'].values ,
np.arange(len(TIMEINDEX)-1,len(TIMEINDEX)-7,-1) )
def test_get_unslipped_price(self):
broker = PaperBroker(DATA_DF)
t_get = T_START+pd.DateOffset(minutes=5)
with pytest.raises(ValueError):
pp = broker.get_unslipped_price('badtick',t_get)
price = broker.get_unslipped_price('tick0',t_get)
assert price == 5
def test_get_price(self):
broker = PaperBroker(DATA_DF,
slippage_time= | pd.DateOffset(seconds=30) | pandas.DateOffset |
import os
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pyarrow
import pyarrow.parquet as pq
import tarfile
from .tables import Table
from .cohort import ProcedureCohort
NEW_COLUMNS = {
'Table1_Encounter_Info.csv': None,
'Table2_Flowsheet_status.csv' : {
'flowsheet_days_since_birth' : 'days_from_dob',
'display_name' : 'name',
'flowsheet_value': 'value',
# 'flowsheet_unit' : 'unit',
'flowsheet_time' : 'time',
}, 'Table2_Flowsheet.csv' : {
'flowsheet_days_since_birth' : 'days_from_dob',
'display_name' : 'name',
'flowsheet_value': 'value',
# 'flowsheet_unit' : 'unit',
'flowsheet_time' : 'time',
}, 'Table3_Lab.csv' : {
'lab_collection_days_since_birth' : 'days_from_dob',
'lab_component_name' : 'name',
'lab_result_value': 'value',
'lab_result_unit' : 'unit',
'lab_collection_time' : 'time'
}, 'Table6_Procedures.csv' : {
'order_name' : 'procedure'
},
}
class SWAN():
def __init__(self, root_dir = '/data/compass/SWAN'):
self.root = root_dir
self.raw_dir = os.path.join(self.root,'raw')
self.encounters = Table(os.path.join(self.raw_dir,'Table1_Encounter_Info.csv'))
self.flowsheet = Table(os.path.join(self.raw_dir, 'Table2_Flowsheet.csv'))
self.labs = Table(os.path.join(self.raw_dir, 'Table3_Lab.csv'))
self.procedures = Table(os.path.join(self.raw_dir,'Table6_Procedures.csv'))
self.diagnosis = Table(os.path.join(self.raw_dir,'Table7_DX.csv'))
self.transfusion = Table(os.path.join(self.raw_dir,'Table5_Blood_Transfusion.csv'))
self.medications = Table(os.path.join(self.raw_dir,'Table4_Administered_Medications.csv'))
def sel(self, procedures, labs=None, flowsheet=None, encounter_id=None):
output_col_order = [
'person_id','encounter_id','procedure',
'days_from_dob_procstart', 'days_from_dob','day','time',
'name','value']
out_dfs = []
# Grab list of encounters from procedures, then fetch labs and flowsheet data from those encounters
proc_df = self.procedures.sel(order_name=list(procedures))
proc_df.days_from_dob_procstart = | pd.to_numeric(proc_df.days_from_dob_procstart, errors='coerce') | pandas.to_numeric |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.