prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import statsmodels
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import sys
pvalues10 = pd.read_csv("data/passage_CpG_iterations/Heteroskedactiy_pvalues_FDR1.csv")
pval_BP = pd.DataFrame(pvalues10["0"])
pval_diff = pd.DataFrame(pvalues10["1"])
mean_db = pd.DataFrame(pvalues10["2"])
fdr_BP = pd.DataFrame(pvalues10["3"])
fdr_diff =
|
pd.DataFrame(pvalues10["4"])
|
pandas.DataFrame
|
# coding: utf-8
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
# import seaborn as sns
import glob
import sys
import argparse as argp
# Parses command line argument for filepath of data to clean
parser = argp.ArgumentParser(description='Clean and aggregate Aagos data.')
parser.add_argument("-f", type=str, required=True, help="filepath to where aagos data is stored. Should be the path into the dir where all run dirs are stored")
parser.add_argument("-n", type=int, required=True, help="number of replicates for this run of Aagos")
parser.add_argument("-l", help="only selects the last generation of snapshot orgs", action="store_true")
args = parser.parse_args()
filepath = args.f
filename = ''
num_replicates = args.n
if filepath is '':
sys.exit("No filepath was provided! Please provide filepath to Aagos data")
# script should look through fitness, representative_org and statistics file
num_files = 1 # number of files that this data script should clean out
# get path for all data files
files = glob.glob(filepath + '/m_*/*')
# stores all the data in this vector
dataframes_stats = []
for f in files:
# get each replicate
if len(files) < num_replicates:
error_msg = "ERROR: incomplete data. A replicate from run " + f + " is missing!"
sys.exit(error_msg)
filename = f.split('/')[1]
replicate = f.split('/')[-1]
curr = f.split('/')[-2]
mut_rates = curr.split('_')
currdata = glob.glob(f + '/*.csv')
curr_dataframes = []
# for each file in replicate, grab the data
for c in currdata:
if('snapshot' not in c): # ignore the snapshot file because not the data we're interested in right now
#print("ignoring snap file ", c)
continue
all_dat = pd.read_csv(c, index_col="update")
if args.l:
curr_dataframes.append(all_dat.loc[40000:])
else:
curr_dataframes.append(all_dat)
# Error check from previous issue I was having, make sure every file is present, otherwise will throw an error
if len(curr_dataframes) < num_files:
num_missing = (num_files - len(curr_dataframes))
error_msg = "there are missing files in the data! " + str(num_missing) + " file[s] are missing from directory " + f
sys.exit(error_msg)
#continue
merged =
|
pd.concat(curr_dataframes, axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 10:25:47 2019
@author: <NAME>
Input files: host and pathogen quantification tables (e.g. pathogen_quant_salmon.tsv, host_quantification_uniquely_mapped_htseq.tsv), raw read statistics, star statistics
Output file: tsv file
Description: Used to collect mapping and quantifications statistics for STAR, HTSeq, Salmon or HTSeq results.
"""
import argparse
import pandas as pd
# function to sum up number of mapped reads from quantification table
def mapping_stats(quantification_table_path,gene_attribute,organism):
# read quantification table
col_names = pd.read_csv(quantification_table_path, sep = '\t', nrows=0).columns
types_dict = {gene_attribute: str}
types_dict.update({col: float for col in col_names if col not in types_dict})
quantification_table = pd.read_csv(quantification_table_path, sep = '\t',index_col=0,dtype=types_dict)
quantification_table = quantification_table.fillna(0)
# initialize dict.
quant_sum = {}
for sample_name in quantification_table: # iterate over columns of quantification_table
if 'NumReads' in sample_name: # for each column (sample) with 'NumReads' sum up the number of reads and add into quant_sum dict.
quant_sum.update({sample_name:sum(quantification_table[sample_name])})
# create data frame from dict.
total_counts_pd = pd.DataFrame.from_dict(quant_sum,orient='index')
total_counts_pd.columns = [organism]
return total_counts_pd
parser = argparse.ArgumentParser(description="""collects and generates mapping statistics""")
parser.add_argument("-q_p", "--quantification_table_pathogen", metavar='<quantification_table_pathogen>', default='.', help="Path to pathogen quantification table; Salmon and HTSeq")
parser.add_argument("-q_h", "--quantification_table_host", metavar='<quantification_table_host>', default='.', help="Path to host quantification table; Salmon and HTSeq")
parser.add_argument("-total_raw", "--total_no_raw_reads",metavar='<total_no_raw_reads>',default='.',help="Path to table with total number of raw reads for each sample; Salmon and STAR")
parser.add_argument("-total_processed", "--total_no_processed_reads", metavar='<total_no_processed_reads>', help="Path to table with total number of processed reads by STAR or Salmon")
parser.add_argument("-m_u", "--mapped_uniquely", metavar='<stats mapped uniquely >', default='.', help="Path to STAR mapping stats of uniquely mapped reads; STAR")
parser.add_argument("-m_m", "--mapped_multi", metavar='<stats multi mapped >', default='.', help="Path to STAR mapping stats of multi mapped reads; STAR")
parser.add_argument("-c_m", "--cross_mapped", metavar='<stats cross mapped >', default='.', help="Path to STAR mapping stats of cross_mapped reads; STAR")
parser.add_argument("-star", "--star_stats", metavar='<stats star >', default='.', help="Path to mapping statistics of STAR; HTSeq")
parser.add_argument("-star_pr", "--star_processed", metavar='<stats star_processed >', default='.',help="Path to STAR stats of processed reads; Salmon in alignment-based mode")
parser.add_argument("-a", "--gene_attribute", default='.', help="gene attribute used in quantification; Salmon and HTSeq")
parser.add_argument("-t", "--tool", metavar='<tool>', help="salmon, salmon_alignment, htseq or star")
parser.add_argument("-o", "--output_dir", metavar='<output>', help="output dir",default='.')
args = parser.parse_args()
# collect statistics for Salmon Selective Alignment mode
if args.tool == 'salmon':
# collect assigned pathogen reads
pathogen_total_counts = mapping_stats(args.quantification_table_pathogen,args.gene_attribute,'pathogen')
# collect assigned host reads
host_total_counts = mapping_stats(args.quantification_table_host,args.gene_attribute,'host')
# combine host and pathogen mapped reads
combined_total_mapped_reads = pd.concat([pathogen_total_counts, host_total_counts], axis=1)
# rename colnames - remove '_NumReads' suffix
new_index1 = [sample.split('_NumReads')[0] for sample in combined_total_mapped_reads.index]
combined_total_mapped_reads.index = new_index1
# calculate total mapped reads
combined_total_mapped_reads['total_mapped_reads'] = combined_total_mapped_reads.sum(axis=1)
if args.total_no_raw_reads.endswith('.tsv'): # if tsv table is defined in total_no_raw_reads argument
# read total number of raw reads
total_reads = pd.read_csv(args.total_no_raw_reads,sep="\t",index_col=0, names=['total_raw_reads'])
# read total number of reads processed by Salmon
processed_reads_salmon = pd.read_csv(args.total_no_processed_reads,sep="\t",index_col=0, names=['processed_reads'])
# combine statistics
results_df = pd.concat([combined_total_mapped_reads, processed_reads_salmon, total_reads], axis=1)
# calculate unmapped reads
results_df['unmapped_reads'] = results_df['processed_reads'] - results_df['total_mapped_reads']
# calculate trimmed reads
results_df['trimmed_reads'] = results_df['total_raw_reads'] - results_df['processed_reads']
else: # if tsv table is not defined in total_no_raw_reads argument
# read total number of reads processed by Salmon
processed_reads_salmon = pd.read_csv(args.total_no_processed_reads,sep="\t",index_col=0, names=['processed_reads'])
results_df =
|
pd.concat([combined_total_mapped_reads, processed_reads_salmon], axis=1)
|
pandas.concat
|
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-Index(date3))
tm.assert_panel_equal(result, expected)
# corners
self.store.put('wp4', wp, table=True)
n = self.store.remove('wp4', where=[Term('major_axis','>',wp.major_axis[-1])])
result = self.store.select('wp4')
tm.assert_panel_equal(result, wp)
def test_terms(self):
wp = tm.makePanel()
p4d = tm.makePanel4D()
self.store.put('wp', wp, table=True)
self.store.put('p4d', p4d, table=True)
# some invalid terms
terms = [
[ 'minor', ['A','B'] ],
[ 'index', ['20121114'] ],
[ 'index', ['20121114', '20121114'] ],
]
for t in terms:
self.assertRaises(Exception, self.store.select, 'wp', t)
self.assertRaises(Exception, Term.__init__)
self.assertRaises(Exception, Term.__init__, 'blah')
self.assertRaises(Exception, Term.__init__, 'index')
self.assertRaises(Exception, Term.__init__, 'index', '==')
self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
# panel
result = self.store.select('wp',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']) ])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = self.store.select('p4d',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']), Term('items', '=', ['ItemA','ItemB']) ])
expected = p4d.truncate(after='20000108').reindex(minor=['A', 'B'],items=['ItemA','ItemB'])
tm.assert_panel4d_equal(result, expected)
# valid terms
terms = [
dict(field = 'major_axis', op = '>', value = '20121114'),
('major_axis', '20121114'),
('major_axis', '>', '20121114'),
(('major_axis', ['20121114','20121114']),),
('major_axis', datetime(2012,11,14)),
'major_axis>20121114',
'major_axis>20121114',
'major_axis>20121114',
(('minor_axis', ['A','B']),),
(('minor_axis', ['A','B']),),
((('minor_axis', ['A','B']),),),
(('items', ['ItemA','ItemB']),),
('items=ItemA'),
]
for t in terms:
self.store.select('wp', t)
self.store.select('p4d', t)
# valid for p4d only
terms = [
(('labels', '=', ['l1','l2']),),
Term('labels', '=', ['l1','l2']),
]
for t in terms:
self.store.select('p4d', t)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0.,1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r : tm.assert_series_equal(l, r, True, True, True)
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
from datetime import date
ser = Series(values, [date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime(2012, 1, 1), datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
# not consolidated
df['foo'] = np.random.randn(len(df))
self.store['df'] = df
recons = self.store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
finally:
store.close()
os.remove(self.scratchpath)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
finally:
store.close()
os.remove(self.scratchpath)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ['foo', 'bar'])
finally:
store.close()
os.remove(self.scratchpath)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
try:
store = HDFStore(self.scratchpath)
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
finally:
store.close()
os.remove(self.scratchpath)
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
try:
store = HDFStore(self.scratchpath)
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
finally:
store.close()
os.remove(self.scratchpath)
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
self.store['obj'] = df1
tm.assert_frame_equal(self.store['obj'], df1)
self.store['obj'] = df2
tm.assert_frame_equal(self.store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, tm.assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, tm.assert_panel_equal)
def test_wide_table_dups(self):
wp = tm.makePanel()
try:
store = HDFStore(self.scratchpath)
store._quiet = True
store.put('panel', wp, table=True)
store.put('panel', wp, table=True, append=True)
recons = store['panel']
tm.assert_panel_equal(recons, wp)
finally:
store.close()
os.remove(self.scratchpath)
def test_long(self):
def _check(left, right):
tm.assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
self.store['a'] =
|
tm.makeTimeDataFrame()
|
pandas.util.testing.makeTimeDataFrame
|
import os
import pandas as pd
import argparse
mainAppRepo = os.path.dirname(os.path.abspath(__file__)) + '/'
# SITE NAME
def get_site_name_from_site_number(site_number):
sites = pd.read_csv(mainAppRepo + 'data/study_sites.txt',
sep=',', header=0, index_col=0) #\\s+
site_name = sites.index._data[site_number]
return site_name
# H ind CSV FILE
def get_csv_file_with_indicator_for_a_context(site_number, chronicle, approx, folder):
indicator = "H"
site_name = get_site_name_from_site_number(site_number)
file_name = "Exps_" + indicator + "_Indicator_" + site_name + "_Chronicle"+ str(chronicle) + "_Approx" + str(approx) + ".csv"
indicator_file = folder + "/" + site_name + "/" + file_name
try:
dfp = pd.read_csv(indicator_file, sep=",")
except:
print("File does not exist")
dfp =
|
pd.DataFrame()
|
pandas.DataFrame
|
import typing
import pytest
from flytekit.core import context_manager
from flytekit.core.context_manager import FlyteContext, FlyteContextManager, Image, ImageConfig
from flytekit.core.type_engine import TypeEngine
from flytekit.models import literals
from flytekit.models.literals import StructuredDatasetMetadata
from flytekit.models.types import SchemaType, SimpleType, StructuredDatasetType
try:
from typing import Annotated
except ImportError:
from typing_extensions import Annotated
import pandas as pd
import pyarrow as pa
from flytekit import kwtypes
from flytekit.types.structured.structured_dataset import (
PARQUET,
StructuredDataset,
StructuredDatasetDecoder,
StructuredDatasetEncoder,
StructuredDatasetTransformerEngine,
convert_schema_type_to_structured_dataset_type,
extract_cols_and_format,
protocol_prefix,
)
my_cols = kwtypes(w=typing.Dict[str, typing.Dict[str, int]], x=typing.List[typing.List[int]], y=int, z=str)
fields = [("some_int", pa.int32()), ("some_string", pa.string())]
arrow_schema = pa.schema(fields)
serialization_settings = context_manager.SerializationSettings(
project="proj",
domain="dom",
version="123",
image_config=ImageConfig(Image(name="name", fqn="asdf/fdsa", tag="123")),
env={},
)
def test_protocol():
assert protocol_prefix("s3://my-s3-bucket/file") == "s3"
assert protocol_prefix("/file") == "/"
def generate_pandas() -> pd.DataFrame:
return pd.DataFrame({"name": ["Tom", "Joseph"], "age": [20, 22]})
def test_types_pandas():
pt = pd.DataFrame
lt = TypeEngine.to_literal_type(pt)
assert lt.structured_dataset_type is not None
assert lt.structured_dataset_type.format == PARQUET
assert lt.structured_dataset_type.columns == []
def test_annotate_extraction():
xyz = Annotated[pd.DataFrame, "myformat"]
a, b, c, d = extract_cols_and_format(xyz)
assert a is pd.DataFrame
assert b is None
assert c == "myformat"
assert d is None
a, b, c, d = extract_cols_and_format(pd.DataFrame)
assert a is pd.DataFrame
assert b is None
assert c is None
assert d is None
def test_types_annotated():
pt = Annotated[pd.DataFrame, my_cols]
lt = TypeEngine.to_literal_type(pt)
assert len(lt.structured_dataset_type.columns) == 4
assert lt.structured_dataset_type.columns[0].literal_type.map_value_type.map_value_type.simple == SimpleType.INTEGER
assert (
lt.structured_dataset_type.columns[1].literal_type.collection_type.collection_type.simple == SimpleType.INTEGER
)
assert lt.structured_dataset_type.columns[2].literal_type.simple == SimpleType.INTEGER
assert lt.structured_dataset_type.columns[3].literal_type.simple == SimpleType.STRING
pt = Annotated[pd.DataFrame, PARQUET, arrow_schema]
lt = TypeEngine.to_literal_type(pt)
assert lt.structured_dataset_type.external_schema_type == "arrow"
assert "some_string" in str(lt.structured_dataset_type.external_schema_bytes)
pt = Annotated[pd.DataFrame, kwtypes(a=None)]
with pytest.raises(AssertionError, match="type None is currently not supported by StructuredDataset"):
TypeEngine.to_literal_type(pt)
def test_types_sd():
pt = StructuredDataset
lt = TypeEngine.to_literal_type(pt)
assert lt.structured_dataset_type is not None
pt = Annotated[StructuredDataset, my_cols]
lt = TypeEngine.to_literal_type(pt)
assert len(lt.structured_dataset_type.columns) == 4
pt = Annotated[StructuredDataset, my_cols, "csv"]
lt = TypeEngine.to_literal_type(pt)
assert len(lt.structured_dataset_type.columns) == 4
assert lt.structured_dataset_type.format == "csv"
pt = Annotated[StructuredDataset, {}, "csv"]
lt = TypeEngine.to_literal_type(pt)
assert len(lt.structured_dataset_type.columns) == 0
assert lt.structured_dataset_type.format == "csv"
def test_retrieving():
assert StructuredDatasetTransformerEngine.get_encoder(pd.DataFrame, "/", PARQUET) is not None
with pytest.raises(ValueError):
# We don't have a default "" format encoder
StructuredDatasetTransformerEngine.get_encoder(pd.DataFrame, "/", "")
class TempEncoder(StructuredDatasetEncoder):
def __init__(self, protocol):
super().__init__(MyDF, protocol)
def encode(self):
...
StructuredDatasetTransformerEngine.register(TempEncoder("gs"), default_for_type=False)
with pytest.raises(ValueError):
StructuredDatasetTransformerEngine.register(TempEncoder("gs://"), default_for_type=False)
class TempEncoder:
pass
with pytest.raises(TypeError, match="We don't support this type of handler"):
StructuredDatasetTransformerEngine.register(TempEncoder, default_for_type=False)
def test_to_literal():
ctx = FlyteContextManager.current_context()
lt = TypeEngine.to_literal_type(pd.DataFrame)
df = generate_pandas()
fdt = StructuredDatasetTransformerEngine()
lit = fdt.to_literal(ctx, df, python_type=pd.DataFrame, expected=lt)
assert lit.scalar.structured_dataset.metadata.structured_dataset_type.format == PARQUET
assert lit.scalar.structured_dataset.metadata.structured_dataset_type.format == PARQUET
sd_with_literal_and_df = StructuredDataset(df)
sd_with_literal_and_df._literal_sd = lit
with pytest.raises(ValueError, match="Shouldn't have specified both literal"):
fdt.to_literal(ctx, sd_with_literal_and_df, python_type=StructuredDataset, expected=lt)
sd_with_nothing = StructuredDataset()
with pytest.raises(ValueError, match="If dataframe is not specified"):
fdt.to_literal(ctx, sd_with_nothing, python_type=StructuredDataset, expected=lt)
sd_with_uri = StructuredDataset(uri="s3://some/extant/df.parquet")
lt = TypeEngine.to_literal_type(Annotated[StructuredDataset, {}, "new-df-format"])
lit = fdt.to_literal(ctx, sd_with_uri, python_type=StructuredDataset, expected=lt)
assert lit.scalar.structured_dataset.uri == "s3://some/extant/df.parquet"
assert lit.scalar.structured_dataset.metadata.structured_dataset_type.format == "new-df-format"
class MyDF(pd.DataFrame):
...
def test_fill_in_literal_type():
class TempEncoder(StructuredDatasetEncoder):
def __init__(self, fmt: str):
super().__init__(MyDF, "tmpfs://", supported_format=fmt)
def encode(
self,
ctx: FlyteContext,
structured_dataset: StructuredDataset,
structured_dataset_type: StructuredDatasetType,
) -> literals.StructuredDataset:
return literals.StructuredDataset(uri="")
StructuredDatasetTransformerEngine.register(TempEncoder("myavro"), default_for_type=True)
lt = TypeEngine.to_literal_type(MyDF)
assert lt.structured_dataset_type.format == "myavro"
ctx = FlyteContextManager.current_context()
fdt = StructuredDatasetTransformerEngine()
sd = StructuredDataset(dataframe=42)
l = fdt.to_literal(ctx, sd, MyDF, lt)
# Test that the literal type is filled in even though the encode function above doesn't do it.
assert l.scalar.structured_dataset.metadata.structured_dataset_type.format == "myavro"
# Test that looking up encoders/decoders falls back to the "" encoder/decoder
empty_format_temp_encoder = TempEncoder("")
StructuredDatasetTransformerEngine.register(empty_format_temp_encoder, default_for_type=False)
res = StructuredDatasetTransformerEngine.get_encoder(MyDF, "tmpfs", "rando")
assert res is empty_format_temp_encoder
def test_sd():
sd = StructuredDataset(dataframe="hi")
sd.uri = "my uri"
assert sd.file_format == PARQUET
with pytest.raises(ValueError, match="No dataframe type set"):
sd.all()
with pytest.raises(ValueError, match="No dataframe type set."):
sd.iter()
class MockPandasDecodingHandlers(StructuredDatasetDecoder):
def decode(
self,
ctx: FlyteContext,
flyte_value: literals.StructuredDataset,
) -> typing.Union[typing.Generator[pd.DataFrame, None, None]]:
yield pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]})
StructuredDatasetTransformerEngine.register(
MockPandasDecodingHandlers(pd.DataFrame, "tmpfs"), default_for_type=False
)
sd = StructuredDataset()
sd._literal_sd = literals.StructuredDataset(
uri="tmpfs://somewhere", metadata=StructuredDatasetMetadata(StructuredDatasetType(format=""))
)
assert isinstance(sd.open(pd.DataFrame).iter(), typing.Generator)
with pytest.raises(ValueError):
sd.open(pd.DataFrame).all()
class MockPandasDecodingHandlers(StructuredDatasetDecoder):
def decode(
self,
ctx: FlyteContext,
flyte_value: literals.StructuredDataset,
) -> pd.DataFrame:
|
pd.DataFrame({"Name": ["Tom", "Joseph"], "Age": [20, 22]})
|
pandas.DataFrame
|
#!/usr/bin/env python
"""
Spectral conversion tools
"""
import wget
import pathlib
import os.path
import pandas as pd
import numpy as np
from astropy.io.votable import parse, parse_single_table
import matplotlib.pyplot as plt
import ska
def load_svo_transmission(filter_id, path=ska.PATH_CACHE):
"""Load a filter transmission curve from SVO Filter Service
http://svo2.cab.inta-csic.es/theory/fps/index.php?mode=voservice
Parameters
==========
filter_id: str
The filter unique ID (see SVO filter service)
path : str
The path to a directory in which filters are stored
Returns
=======
pd.DataFrame
Filter transmission curve from SVO Filter Profile Service
"""
# Load filter VOTable
VOFilter = load_svo_filter(filter_id)
# Return transmission curve
return pd.DataFrame(data=VOFilter.get_first_table().array.data)
def load_svo_filter(filter_id, path=ska.PATH_CACHE):
"""Load a filter VOTable from SVO Filter Service
http://svo2.cab.inta-csic.es/theory/fps/index.php?mode=voservice
Parameters
==========
filter_id: str
The filter unique ID (see SVO filter service)
path : str
The path to a directory in which filters are stored
Returns
=======
VOFilter : astropy.io.votable.tree.VOTableFile
Filter VOTable from SVO Filter Profile Service
"""
# Download filter VOTable if not present in cache
if ~os.path.isfile(ska.PATH_CACHE + "/" + filter_id + ".xml"):
_ = get_svo_filter(filter_id)
# Read, parse, return VOTable
VOFilter = parse(ska.PATH_CACHE + "/" + filter_id + ".xml")
return VOFilter
def get_svo_filter(filter_id, path=ska.PATH_CACHE):
"""Retrieve a filter VOTable from SVO Filter Service
http://svo2.cab.inta-csic.es/theory/fps/index.php?mode=voservice
Parameters
==========
filter_id: str
The filter unique ID (see SVO filter service)
path : str
The path to a directory in which filters will be stored
Returns
=======
str
The path to the filter
"""
# SVO Base URL for queries
url = f"http://svo2.cab.inta-csic.es/theory/fps3/fps.php?ID={filter_id}"
# Parse filter name
parts = filter_id.split("/")
if len(parts) > 1:
rep = path + "/" + parts[0] + "/"
name = parts[1]
else:
rep = path + "/"
name = filter_id
out = rep + name + ".xml"
# Create directory and download VOTable
if not os.path.isfile(out):
pathlib.Path(rep).mkdir(parents=True, exist_ok=True)
wget.download(url, out=out)
# Return path to filter VOTable
return out
def compute_flux(spectrum, filter_id):
"""Computes the flux of a spectrum in a given band.
Parameters
----------
spectrum : pd.DataFrame
Wavelength: in Angstrom
Flux: Flux density (erg/cm2/s/ang)
filter_id: str
The filter unique ID (see SVO filter service)
Returns
-------
float
The computed mean flux density
"""
# Transmission curve
VOFilter = load_svo_filter(filter_id)
trans = load_svo_transmission(filter_id)
# Integration grid is built from the transmission curve
trans = trans[trans["Transmission"] >= 1e-5]
lambda_min = trans["Wavelength"].min()
lambda_max = trans["Wavelength"].max()
# Wavelength range to integrate over
lambda_int = np.arange(lambda_min, lambda_max, 0.5)
# Detector type
# Photon counter
try:
VOFilter.get_field_by_id("DetectorType")
factor = lambda_int
# Energy counter
except:
factor = lambda_int * 0 + 1
# Interpolate over the transmission range
interpol_transmission = np.interp(
lambda_int, trans["Wavelength"], trans["Transmission"]
)
interpol_spectrum = np.interp(lambda_int, spectrum["Wavelength"], spectrum["Flux"])
# Compute the flux by integrating over wavelength.
nom = np.trapz(
interpol_spectrum * interpol_transmission * factor,
lambda_int,
)
denom = np.trapz(interpol_transmission * factor, lambda_int)
flux = nom / denom
return flux
def compute_color(spectrum, filter_id_1, filter_id_2, phot_sys="AB", vega=None):
"""Computes filter_1-filter_2 color of spectrum in the requested system.
Parameters
==========
spectrum : pd.DataFrame
Source flux density (erg/cm2/s/ang)
filter_id_1: str
The first filter unique ID (see SVO filter service)
filter_id_2: str
The second filter unique ID (see SVO filter service)
phot_sys : str
Photometric system in which to report the color (default=AB)
vega : pd.DataFrame
Spectrum of Vega. Columns must be
Wavelength: in Angstrom
Flux: Flux density (erg/cm2/s/ang)
Returns
=======
float
The requested color
"""
# Compute fluxes in each filter
flux1 = compute_flux(spectrum, filter_id_1)
flux2 = compute_flux(spectrum, filter_id_2)
# Magnitude in AB photometric system
if phot_sys == "AB":
# Get Pivot wavelength for both filters
VOFilter_1 = load_svo_filter(filter_id_1)
pivot_1 = VOFilter_1.get_field_by_id("WavelengthPivot").value
VOFilter_2 = load_svo_filter(filter_id_2)
pivot_2 = VOFilter_2.get_field_by_id("WavelengthPivot").value
# Compute and return the color
return -2.5 * np.log10(flux1 / flux2) - 5 * np.log10(pivot_1 / pivot_2)
# Magnitude in Vega photometric system
elif phot_sys == "Vega":
# Read Vega spectrum if not provided
if vega == None:
vega =
|
pd.read_csv(ska.PATH_VEGA)
|
pandas.read_csv
|
"""General functions for working with observations.
"""
import collections
import os
from shapely.geometry import Point
import pandas as pd
from gisutils import df2shp, project
from mfsetup.obs import make_obsname
from mfsetup.units import convert_length_units, convert_volume_units, convert_time_units
from mapgwm.utils import makedirs, assign_geographic_obsgroups, cull_data_to_active_area
def format_site_ids(iterable, add_leading_zeros=False):
"""Cast site ids to strings"""
str_ids = []
for id in iterable:
if add_leading_zeros:
str_ids.append(format_usgs_sw_site_id(id))
else:
str_ids.append(str(id))
return str_ids
def format_usgs_sw_site_id(stationID):
"""Add leading zeros to NWIS surface water sites, if they are missing.
See https://help.waterdata.usgs.gov/faq/sites/do-station-numbers-have-any-particular-meaning.
Zeros are only added to numeric site numbers less than 15 characters in length.
"""
if not str(stationID).startswith('0') and str(stationID).isdigit() and \
0 < int(str(stationID)[0]) < 10 and len(str(stationID)) < 15:
return '0{}'.format(stationID)
return str(stationID)
def preprocess_obs(data, metadata=None, data_columns=['flow'],
start_date=None, active_area=None,
active_area_id_column=None,
active_area_feature_id=None,
source_crs=4269, dest_crs=5070,
datetime_col='datetime',
site_no_col='site_no',
line_id_col='line_id',
x_coord_col='x',
y_coord_col='y',
name_col='name',
qualifier_column=None,
default_qualifier='measured',
obstype='flow',
include_sites=None,
include_line_ids=None,
source_length_units='ft',
source_time_units='s',
dest_length_units='m',
dest_time_units='d',
geographic_groups=None,
geographic_groups_col=None,
max_obsname_len=None,
add_leading_zeros_to_sw_site_nos=False,
column_renames=None,
outfile=None,
):
"""Preprocess observation data, for example, from NWIS or another data source that
outputs time series in CSV format with site locations and identifiers.
* Data are reprojected from a `source_crs` (Coordinate reference system; assumed to be in geographic coordinates)
to the CRS of the model (`dest_crs`)
* Data are culled to a `start_date` and optionally, a polygon or set of polygons defining the model area
* length and time units are converted to those of the groundwater model.
* Prefixes for observation names (with an optional length limit) that identify the location are generated
* Preliminary observation groups can also be assigned, based on geographic areas defined by polygons
(`geographic_groups` parameter)
Parameters
----------
data : csv file or DataFrame
Time series of observations.
Columns:
===================== ======================================
site_no site identifier
datetime measurement dates/times
x x-coordinate of site
y y-coordinate of site
data_columns Columns of observed values
qualifier_column Optional column with qualifiers for values
===================== ======================================
Notes:
* x and y columns can alternatively be in the metadata table
* data_columns are denoted in `data_columns`; multiple
columns can be included to process base flow and total flow, or
other statistics in tandem
* For example, `qualifier_column` may have "estimated" or "measured"
flags denoting whether streamflows were derived from measured values
or statistical estimates.
metadata : csv file or DataFrame
Observation site information.
May include columns:
================= ================================================================================
site_no site identifier
x x-coordinate of site
y y-coordinate of site
name name of site
line_id_col Identifier for a line in a hydrography dataset that the site is associated with.
================= ================================================================================
Notes:
* other columns in metadata will be passed through to the metadata output
data_columns : list of strings
Columns in data with values or their statistics.
By default, ['q_cfs']
start_date : str (YYYY-mm-dd)
Simulation start date (cull observations before this date)
active_area : str
Shapefile with polygon to cull observations to. Automatically reprojected
to dest_crs if the shapefile includes a .prj file.
by default, None.
active_area_id_column : str, optional
Column in active_area with feature ids.
By default, None, in which case all features are used.
active_area_feature_id : str, optional
ID of feature to use for active area
By default, None, in which case all features are used.
source_crs : obj
Coordinate reference system of the head observation locations.
A Python int, dict, str, or :class:`pyproj.crs.CRS` instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
By default, epsg:4269
dest_crs : obj
Coordinate reference system of the model. Same input types
as ``source_crs``.
By default, epsg:5070
datetime_col : str, optional
Column name in data with observation date/times,
by default 'datetime'
site_no_col : str, optional
Column name in data and metadata with site identifiers,
by default 'site_no'
line_id_col : str, optional
Column name in data or metadata with identifiers for
hydrography lines associated with observation sites.
by default 'line_id'
x_coord_col : str, optional
Column name in data or metadata with x-coordinates,
by default 'x'
y_coord_col : str, optional
Column name in data or metadata with y-coordinates,
by default 'y'
name_col : str, optional
Column name in data or metadata with observation site names,
by default 'name'
qualifier_column : str, optional
Column name in data with observation qualifiers, such
as "measured" or "estimated"
by default 'category'
default_qualifier : str, optional
Default qualifier to populate qualifier_column if it
is None. By default, "measured"
obstype : str, optional
Modflow-6 observation type (e.g. 'downstream-flow' or 'stage').
The last part of the name (after the last hyphen) is used as a suffix in the output
``obsprefix`` column. E.g. 07275000-flow for downstream or upstream-flow at site 07275000.
By default, 'flow'
include_sites : list-like, optional
Exclude output to these sites.
by default, None (include all sites)
include_line_ids : list-like, optional
Exclude output to these sites, represented by line identifiers.
by default, None (include all sites)
source_length_units : str, 'm3', 'm', 'cubic meters', 'ft3', etc.
Length or volume units of the source data. By default, 'ft3'
source_time_units : str, 's', 'seconds', 'days', etc.
Time units of the source data. By default, 's'
dest_length_units : str, 'm3', 'cubic meters', 'ft3', etc.
Length or volume units of the output (model). By default, 'm'
dest_time_units : str, 's', 'seconds', 'days', etc.
Time units of the output (model). By default, 'd'
geographic_groups : file, dict or list-like
Option to group observations by area(s) of interest. Can
be a shapefile, list of shapefiles, or dictionary of shapely polygons.
A 'group' column will be created in the metadata, and observation
sites within each polygon will be assigned the group name
associated with that polygon.
For example::
geographic_groups='../source_data/extents/CompositeHydrographArea.shp'
geographic_groups=['../source_data/extents/CompositeHydrographArea.shp']
geographic_groups={'cha': <shapely Polygon>}
Where 'cha' is an observation group name for observations located within the
the area defined by CompositeHydrographArea.shp. For shapefiles,
group names are provided in a `geographic_groups_col`.
geographic_groups_col : str
Field name in the `geographic_groups` shapefile(s) containing the
observation group names associated with each polygon.
max_obsname_len : int or None
Maximum length for observation name prefix. Default of 13
allows for a PEST obsnme of 20 characters or less with
<prefix>_yyyydd or <prefix>_<per>d<per>
(e.g. <prefix>_2d1 for a difference between stress periods 2 and 1)
If None, observation names will not be truncated. PEST++ does not have
a limit on observation name length.
add_leading_zeros_to_sw_site_nos : bool
Whether or not to pad site numbers using the
:func:~`mapgwm.swflows.format_usgs_sw_site_id` function.
By default, False.
column_renames : dict, optional
Option to rename columns in the data or metadata that are different than those listed above.
For example, if the data file has a 'SITE_NO' column instead of 'SITE_BADGE'::
column_renames={'SITE_NO': 'site_no'}
by default None, in which case the renames listed above will be used.
Note that the renames must be the same as those listed above for
:func:`mapgwm.swflows.preprocess_obs` to work.
outfile : str
Where output file will be written. Metadata are written to a file
with the same name, with an additional "_info" suffix prior to
the file extension.
Returns
-------
data : DataFrame
Preprocessed time series
metadata : DataFrame
Preprocessed metadata
References
----------
`The PEST++ Manual <https://github.com/usgs/pestpp/tree/master/documentation>`
Notes
-----
"""
# outputs
if outfile is not None:
outpath, filename = os.path.split(outfile)
makedirs(outpath)
outname, ext = os.path.splitext(outfile)
out_info_csvfile = outname + '_info.csv'
out_data_csvfile = outfile
out_shapefile = outname + '_info.shp'
# read the source data
if not isinstance(data, pd.DataFrame):
df =
|
pd.read_csv(data, dtype={site_no_col: object})
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name='TOTAL')
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['TOTAL', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('TOTAL', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['TOTAL']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
exp_rows.name = 'TOTAL'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
for margins_name in [666, None, ['a', 'b']]:
with pytest.raises(ValueError):
crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name=margins_name)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')],
names=['b', 'c'])
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, np.nan, 2],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns =
|
Index([3, 4, 'All'], name='b')
|
pandas.Index
|
#!/usr/bin/env python
# coding: utf-8
# ***********************************************************************
#
# V2W-BERT: A Python library for vulnerability classification
# <NAME> (<EMAIL>) : Purdue University
# <NAME> (<EMAIL>): Pacific Northwest National Laboratory
#
# ***********************************************************************
#
#
# Copyright © 2022, Battelle Memorial Institute
# All rights reserved.
#
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# 1. Redistributions of source code must retain the above copyright notice, this
#
# list of conditions and the following disclaimer.
#
#
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
#
# this list of conditions and the following disclaimer in the documentation
#
# and/or other materials provided with the distribution.
#
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ## Download and Preprocess Latest Dataset
#
# In this script we first download all CVEs to-date. Use the NVD and Mitre hierarchy documents to prepare a train test validation set.
# ## Import libraries
# In[199]:
import os
import requests, zipfile, io
import pickle
import pandas as pd
import numpy as np
# Here, I have disabled a false alarm that would otherwise trip later in the project.
pd.options.mode.chained_assignment = None
# The datetime library will let me filter the data by reporting date.
from datetime import datetime, timedelta
# Since the NVD data is housed in JavaScript Object Notation (JSON) format, I will need the json_normalize function to access and manipulate the information.
from pandas.io.json import json_normalize
import sys
import torch
import re
from ipynb.fs.full.Dataset import Data
# In[200]:
# Expanding view area to facilitate data manipulation.
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 100)
# In[201]:
import argparse
from argparse import ArgumentParser
def get_configuration():
parser = ArgumentParser()
parser.add_argument('--dir', type=str, default='Dataset')
parser.add_argument('--from_year', type=int, default=2020)
parser.add_argument('--to_year', type=int, default=2022)
parser.add_argument('--from_train_year', type=int, default=1990)
parser.add_argument('--to_train_year', type=int, default=2020)
parser.add_argument('--from_test_year', type=int, default=2021)
parser.add_argument('--to_test_year', type=int, default=2021)
parser.add_argument('--from_val_year', type=int, default=2022)
parser.add_argument('--to_val_year', type=int, default=2022)
parser.add_argument('-f') ##dummy for jupyternotebook
args = parser.parse_args()
dict_args = vars(args)
return args, dict_args
args, dict_args=get_configuration()
print(dict_args)
print(args.dir)
# In[ ]:
# ### Configuration
# In[202]:
class DataPath():
def __init__(self, args, dataset_dir='',results_dir=''):
#File locations
self.PATH_TO_DATASETS_DIRECTORY = dataset_dir+'/NVD/raw/'
self.PATH_TO_RESULTS_DIRECTORY = results_dir+'/NVD/processed/'
self.NVD_CVE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CVE_data.csv'
self.Graph_FILE=self.PATH_TO_RESULTS_DIRECTORY+'GRAPH_data'
self.GRAPHVIZ_HIERARCHY=self.PATH_TO_RESULTS_DIRECTORY+'Hierarchy'
self.MITRE_CWE_FILE=self.PATH_TO_DATASETS_DIRECTORY+'CWE_RC_1000.csv'
self.NVD_CWE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CWE_data.csv'
self.MASK_FILE = self.PATH_TO_RESULTS_DIRECTORY+'NVD_data'
self.MERGED_NVD_CVE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CVE.csv'
self.FILTERED_NVD_CWE_FILE=self.PATH_TO_RESULTS_DIRECTORY+'NVD_CWE.csv'
self.YEARS=list(range(args.from_year,args.to_year+1))
self.TRAIN_YEARS=list(range(args.from_train_year,args.to_train_year+1))
self.VAL_YEARS=list(range(args.from_val_year,args.to_val_year+1))
self.TEST_YEARS=list(range(args.from_test_year,args.to_test_year+1))
if not os.path.exists(self.PATH_TO_DATASETS_DIRECTORY):
print("Creating directory: ",self.PATH_TO_DATASETS_DIRECTORY)
os.makedirs(self.PATH_TO_DATASETS_DIRECTORY)
if not os.path.exists(self.PATH_TO_RESULTS_DIRECTORY):
print("Creating directory: ",self.PATH_TO_RESULTS_DIRECTORY)
os.makedirs(self.PATH_TO_RESULTS_DIRECTORY)
class Config(DataPath):
def __init__(self,args, dataset_dir='',results_dir=''):
super(Config, self).__init__(args, dataset_dir, results_dir)
self.CLUSTER_LABEL=0
self.download()
def download(self):
for year in self.YEARS:
if not os.path.exists(self.PATH_TO_DATASETS_DIRECTORY+'nvdcve-1.1-'+str(year)+'.json'):
url = 'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-'+str(year)+'.json.zip'
print("Downloading: ",url)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.PATH_TO_DATASETS_DIRECTORY)
print("CVEs downloaded")
if not os.path.exists(self.MITRE_CWE_FILE):
url = 'https://drive.google.com/uc?export=download&id=1-phSamb4RbxyoBc3AQ2xxKMSsK2DwPyn'
print("Downloading: ",url)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.PATH_TO_DATASETS_DIRECTORY)
print("CWEs downloaded")
config=Config(args,dataset_dir=args.dir,results_dir=args.dir)
# ### ProfecessCVES
# In[203]:
def getDataFrame(config):
df = []
counter=0
for year in config.YEARS:
yearly_data = pd.read_json(config.PATH_TO_DATASETS_DIRECTORY+'nvdcve-1.1-'+str(year)+'.json')
if counter == 0:
df = yearly_data
else:
df = df.append(yearly_data)
counter+=1
return df
# In[204]:
def removeREJECT(description):
series=[]
for x in description:
try:
if "REJECT" in (json_normalize(x)["value"])[0]:
series.append(False)
else:
series.append(True)
except:
series.append(False)
return pd.Series(series,index=description.index)
# In[205]:
def removeUnknownCWE(description):
series=[]
for x in description:
try:
if x == "UNKNOWN" or x == "NONE":
series.append(False)
else:
series.append(True)
except:
series.append(False)
return pd.Series(series,index=description.index)
# In[206]:
def getCVEDescription(df):
CVE_entry = []
CVE_index = df["cve.description.description_data"].index
for x in df["cve.description.description_data"]:
try:
raw_CVE_entry = json_normalize(x)["value"][0]
clean_CVE_entry = str(raw_CVE_entry)
CVE_entry.append(clean_CVE_entry)
except:
CVE_entry.append("NONE")
CVE_entry = pd.Series(CVE_entry, index = CVE_index)
return CVE_entry
# In[207]:
# Defining a function which I will use below
def consolidate_unknowns(x):
if x == "NVD-CWE-Other" or x == "NVD-CWE-noinfo":
return "UNKNOWN"
else:
return x
# In[208]:
def getCWEs(df):
CWE_entry = []
CWE_index = df["cve.problemtype.problemtype_data"].index
for x in df["cve.problemtype.problemtype_data"]:
try:
CWE_normalized_json_step_1 = json_normalize(x)
CWE_normalized_json_step_2 = CWE_normalized_json_step_1["description"][0]
CWEs=[]
#print(json_normalize(CWE_normalized_json_step_2)["value"])
for CWE in json_normalize(CWE_normalized_json_step_2)["value"]:
#CWEs.append(consolidate_unknowns(str(CWE)))
CWEs.append(str(CWE))
CWE_entry.append(CWEs)
except:
CWE_entry.append(['NONE'])
CWE_entry = pd.Series(CWE_entry, index = CWE_index)
return CWE_entry
# In[209]:
def ProcessDataset(config):
print("Loading data from file---")
df=getDataFrame(config)
CVE_Items = json_normalize(df["CVE_Items"])
df = pd.concat([df.reset_index(), CVE_Items], axis=1)
df = df.drop(["index", "CVE_Items"], axis=1)
df = df.rename(columns={"cve.CVE_data_meta.ID": "CVE ID"})
CVE_ID = df["CVE ID"]
df.drop(labels=["CVE ID"], axis=1,inplace = True)
df.insert(0, "CVE ID", CVE_ID)
##remove description with REJECT
print("Removing REJECTs---")
df=df[removeREJECT(df["cve.description.description_data"])]
##Extract CVE description
CVE_description=getCVEDescription(df)
df.insert(1, "CVE Description", CVE_description)
##Extract CWEs
print("Extracting CWEs---")
CWE_entry=getCWEs(df)
df.insert(2, "CWE Code", CWE_entry)
# ##Remove CWEs we don't know true label
# print("Removing Unknown CWEs---")
# df=df[removeUnknownCWE(df["CWE Code 1"])]
# Converting the data to pandas date-time format
df["publishedDate"] = pd.to_datetime(df["publishedDate"])
return df
# ### ProcessCWEs
# In[210]:
def processAndSaveCVE(config, LOAD_SAVED=True):
if not os.path.exists(config.NVD_CVE_FILE) or LOAD_SAVED==False:
df=ProcessDataset(config)
df=df[['publishedDate', 'CVE ID', 'CVE Description', 'CWE Code']]
df.to_csv(config.NVD_CVE_FILE,index=False)
else:
df=pd.read_csv(config.NVD_CVE_FILE)
return df
# In[211]:
def ProcessCWE_NVD(config):
# Importing BeautifulSoup and an xml parser to scrape the CWE definitions from the NVD web site
from bs4 import BeautifulSoup
import lxml.etree
# loading the NVD CWE Definitions page and scraping it for the first table that appears
NVD_CWE_description_url = requests.get("https://nvd.nist.gov/vuln/categories")
CWE_definitions_page_soup = BeautifulSoup(NVD_CWE_description_url.content, "html.parser")
table = CWE_definitions_page_soup.find_all('table')[0]
df_CWE_definitions = pd.read_html(str(table))[0]
return df_CWE_definitions
# In[212]:
def ProcessCWE_MITRE(config):
print('Loading CWE file : {0}'.format(config.MITRE_CWE_FILE))
#df_CWE_definitions = pd.read_csv(config.MITRE_CWE_FILE, quotechar='"',delimiter=',', encoding='latin1',index_col=False)
df_CWE_definitions = pd.read_csv(config.MITRE_CWE_FILE, delimiter=',', encoding='latin1',index_col=False)
return df_CWE_definitions
# In[213]:
def processAndSaveCWE(config, LOAD_SAVED=True):
if not os.path.exists(config.MITRE_CWE_FILE) or LOAD_SAVED==False:
df_CWE_MITRE=ProcessCWE_MITRE(config)
df_CWE_MITRE.to_csv(config.MITRE_CWE_FILE,index=False)
else:
df_CWE_MITRE=
|
pd.read_csv(config.MITRE_CWE_FILE, index_col=False)
|
pandas.read_csv
|
from lxml import html
import requests
import pandas as pd
from datetime import datetime
pd.set_option('display.max_columns', 20)
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
import math
import os
import lapras
import numpy as np
import pandas as pd
from collections import OrderedDict
from sklearn import model_selection
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import roc_auc_score, roc_curve
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.metrics import AUC, Precision
import tensorflow as tf
from hyperopt import fmin, tpe, hp
import hyperopt
import pickle
class WideDeepBinary():
def __init__(self, static_continue_X_cols:list, static_discrete_X_cols:list, rnn_continue_X_cols:list, ts_step: int):
"""
Column names don't matter, but you should know what are you modeling.
Args:
static_continue_X_cols: 静态连续特征列名
static_discrete_X_cols: 静态离散特征列名
rnn_continue_X_cols: 时序连续特征列名
ts_step: 时间序列步长
"""
self.static_continue_X_cols = static_continue_X_cols
self.static_discrete_X_cols = static_discrete_X_cols
self.rnn_continue_X_cols = rnn_continue_X_cols
self.ts_step = ts_step
self.embedding_dim = self.rnn_cells = self.activation = self.dropout = \
self.hidden_units = self.model = None
self.le_dict = {}
self.scalar_basic = None
self.scalar_rnn = None
def compile(self, embedding_dim=4, rnn_cells=64, hidden_units=[64,16], activation='relu',
dropout=0.3, loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=tf.keras.optimizers.Adam(1e-4), metrics=[Precision(), AUC()], summary=True, **kwargs):
"""
Args:
embedding_dim: 静态离散特征embedding参数,表示输出向量维度,输入词典维度由训练数据自动计算产生
rnn_cells: 时序连续特征输出神经元个数
hidden_units: MLP层神经元参数,最少2层
activation: MLP层激活函数
dropout: dropout系数
loss: 损失函数
optimizer: 优化器
metrics: 效果度量函数
summary: 是否输出summary信息
"""
self.embedding_dim = embedding_dim
self.rnn_cells = rnn_cells
self.hidden_units = hidden_units
self.activation = activation
self.dropout = dropout
if not self.scalar_rnn:
print("数据Scalar尚未初始化,请先调用pre_processing方法进行数据预处理,然后才能编译模型!")
return
# 定义输入格式
input_features = OrderedDict()
input_features['input_rnn_continue'] = tf.keras.layers.Input(shape=(self.ts_step, len(self.rnn_continue_X_cols)), name='input_rnn_continue') # 连续时间序列数据
if self.static_continue_X_cols:
input_features['input_static_continue'] = tf.keras.layers.Input(shape=len(self.static_continue_X_cols), name='input_static_continue') # 连续静态数据
for col in self.static_discrete_X_cols:
input_features[col] = tf.keras.layers.Input(shape=1, name=col) # 静态离散特征
# 构造网络结构
rnn_dense = [tf.keras.layers.LSTM(units=self.rnn_cells)(input_features['input_rnn_continue'])]
static_dense = []
if self.static_continue_X_cols:
static_dense = [input_features['input_static_continue']]
static_discrete = []
for col in self.static_discrete_X_cols:
vol_size = len(self.le_dict[col].classes_) + 1
vec = tf.keras.layers.Embedding(vol_size, self.embedding_dim)(input_features[col])
static_discrete.append(tf.reshape(vec, [-1, self.embedding_dim]))
concated_vec = rnn_dense + static_dense + static_discrete
if len(concated_vec) == 1:
x = concated_vec[0]
else:
x = tf.keras.layers.concatenate(concated_vec, axis=1)
# 特征拼接后加入全连接层
for i in range(len(hidden_units)):
x = tf.keras.layers.Dense(hidden_units[i], activation=activation)(x)
x = tf.keras.layers.Dropout(dropout)(x)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='action')(x)
inputs_list = list(input_features.values())
self.model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
self.model.compile(loss=loss, optimizer=optimizer, metrics=metrics, **kwargs)
if summary:
self.model.summary()
def pre_processing(self, basic_df:pd.DataFrame, rnn_df:pd.DataFrame, id_label:str, ts_label:str, training=True,
y_label='y', test_size=0.2, fill_na=0, **kwargs):
"""
对原始数据进行预处理,输入为pandas dataframe,输出为直接入模的numpy array
Args:
basic_df: 静态数据的Dataframe
rnn_df: 时序数据的Dataframe
id_label: id的列名
ts_label: 时序标签的列名
training: 是否为训练步骤,True or False
y_label: Y标签列名 training为True时起作用
test_size: 测试集比例 training为True时起作
fill_na: 缺失值填充
"""
if type(basic_df) != pd.DataFrame or type(rnn_df) != pd.DataFrame:
raise ValueError("Error: Input X data must be Pandas.DataFrame format.\n输入数据必须是Pandas DataFrame格式!")
if len(basic_df)*self.ts_step != len(rnn_df):
raise ValueError("Error: Some of the train data size is different from others, please check it again."
"\n时间序列数据记录数没对齐!")
try:
basic_df[self.static_continue_X_cols]
basic_df[self.static_discrete_X_cols]
rnn_df[self.rnn_continue_X_cols]
except:
raise ValueError("Error: Some of the declared columns is not in the input data, please check it again."
"\n声明的列名在数据中不存在!")
# 对连续型数据填充缺失值
basic_df = basic_df.fillna(fill_na)
rnn_df = rnn_df.fillna(fill_na)
# 区分训练和预测场景
if training:
# 划分训练集和验证集
train_id, test_id, _, _ = model_selection.train_test_split(basic_df[[id_label, y_label]],
basic_df[y_label], test_size=test_size, random_state=2020)
# 划分训练集和测试集——静态和时序宽表
train_basic_df = basic_df[basic_df[id_label].isin(train_id[id_label])]
test_basic_df = basic_df[basic_df[id_label].isin(test_id[id_label])]
train_rnn_df = rnn_df[rnn_df[id_label].isin(train_id[id_label])]
test_rnn_df = rnn_df[rnn_df[id_label].isin(test_id[id_label])]
# 排序
train_basic_df = train_basic_df.sort_values(id_label)
test_basic_df = test_basic_df.sort_values(id_label)
train_rnn_df = train_rnn_df.sort_values([id_label, ts_label])
test_rnn_df = test_rnn_df.sort_values([id_label, ts_label])
if self.static_continue_X_cols:
self.scalar_basic = StandardScaler()
self.scalar_basic.fit(train_basic_df[self.static_continue_X_cols])
basic_X_train_transform = self.scalar_basic.transform(train_basic_df[self.static_continue_X_cols])
basic_X_test_transform = self.scalar_basic.transform(test_basic_df[self.static_continue_X_cols])
self.scalar_rnn = StandardScaler()
self.scalar_rnn.fit(train_rnn_df[self.rnn_continue_X_cols])
rnn_X_train_transform = self.scalar_rnn.transform(train_rnn_df[self.rnn_continue_X_cols])
rnn_X_test_transform = self.scalar_rnn.transform(test_rnn_df[self.rnn_continue_X_cols])
# 对离散特征进行LabelEncoder编码
if self.static_discrete_X_cols:
category_X_train = pd.DataFrame(columns=self.static_discrete_X_cols)
category_X_test =
|
pd.DataFrame(columns=self.static_discrete_X_cols)
|
pandas.DataFrame
|
""" Problem 1
Unsupervised Learning
MovieLens 100k dataset
Matrix Factorization with side information
This is the collaboratively homework 3
Base on <NAME> examples
by Unsupervised Learning Class 2021
Texas Tech University - Costa Rica
"""
import os
# Reduse tensorflow logs in terminal after model works fine
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import pickle
import numpy as np
import pandas as pd
import sklearn.metrics
import tensorflow as tf
import matplotlib.pyplot as plt
# Load and prepare data
# Load the training and test data from the Pickle file
with open("movielens_dataset.pickle", "rb") as f:
train_data, train_labels, test_data, test_labels = pickle.load(f)
# Get the side info
with open("movielens_side_info.pickle_v2", "rb") as f:
users_side_info, movies_side_info = pickle.load(f)
# Standardize scale of columns in user side-info table
for col in users_side_info.columns:
if col == "User_ID":
continue
users_side_info[col] = (users_side_info[col] - users_side_info[col].mean())/users_side_info[col].std()
# Standardize scale of columns in movies side-info table
for col in movies_side_info.columns:
if col == "Movie_ID":
continue
movies_side_info[col] = (movies_side_info[col] - movies_side_info[col].mean())/movies_side_info[col].std()
# Get the sizes
n_users = max(train_data["User_ID"])
n_movies = max(train_data["Movie_ID"])
# Create a user vector
train_user = train_data.loc[:,["User_ID"]]
test_user = test_data.loc[:,["User_ID"]]
# Merge the user side info
cols = users_side_info.columns # Keep all side-info columns
#cols = ["User_ID"] # Don't keep any side-info columns
train_user = pd.merge(train_user, users_side_info[cols], on="User_ID", how='left')
test_user = pd.merge(test_user, users_side_info[cols], on="User_ID", how='left')
# Create a movies vector
train_movie = train_data.loc[:,["Movie_ID"]]
test_movie = test_data.loc[:,["Movie_ID"]]
# Merge the movie side info
cols = movies_side_info.columns # Keep all side-info columns
#cols = ["Movie_ID"]; # Don't keep any side-info columns
train_movie = pd.merge(train_movie, movies_side_info[cols], on="Movie_ID", how='left')
test_movie = pd.merge(test_movie, movies_side_info[cols], on="Movie_ID", how='left')
# Reset the train label indices, to be consistent with the merged tables
train_labels = train_labels.reset_index(drop=True)
test_labels = test_labels.reset_index(drop=True)
# One-hot encode User_ID
# To avoid issues with missing users, we will cast the columns to a specific list of categories
user_list = range(1, n_users+1)
train_user["User_ID"] = train_user["User_ID"].astype(pd.api.types.CategoricalDtype(user_list))
test_user["User_ID"] = test_user["User_ID"].astype(pd.api.types.CategoricalDtype(user_list))
train_user = pd.get_dummies(train_user, columns=["User_ID"])
test_user =
|
pd.get_dummies(test_user, columns=["User_ID"])
|
pandas.get_dummies
|
import argparse
import os
import re
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from scipy import stats
MIN_TEXT_SIZE = 1000
MAX_TEXT_SIZE = 100000
def relative_file(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
def read_csv(filename):
print('reading:', filename)
return pd.read_csv(filename)
parser = argparse.ArgumentParser()
parser.add_argument(
'--output', help='path the the output directory', default=relative_file('../results')
)
parser.add_argument(
'--countries',
help='path the the country metadata',
default=relative_file('../data/pmc_metadata.affiliations.countries.csv'),
)
parser.add_argument(
'--size',
help='path the the text size metadata',
default=relative_file('../data/pmc_articles.text_size.csv'),
)
parser.add_argument(
'--pubtype',
help='path the the publication type metadata',
default=relative_file('../data/pmc_metadata.entrez.csv'),
)
parser.add_argument(
'--scores',
help='path the the LDA scores data',
default=relative_file('../data/pmc_articles.lda_coherence.csv'),
)
args = parser.parse_args()
sns.set_style('whitegrid')
def savefig(ax, plot_name):
plot_name = os.path.join(args.output, plot_name)
print('writing:', plot_name)
try:
ax.figure.savefig(plot_name, bbox_inches='tight')
except AttributeError:
ax.savefig(plot_name, bbox_inches='tight')
plt.close()
loc_df = read_csv(args.countries)[['PMCID', 'country']]
pubtype_df = read_csv(args.pubtype)
pubtype_df = pubtype_df[pubtype_df.lang == 'eng']
text_size_df = read_csv(args.size)
df = read_csv(args.scores)
df['PMCID'] = df.filename.str.split('.').str[0]
df = df.merge(loc_df, on=['PMCID'], how='inner')
df = df.merge(pubtype_df, on=['PMCID'], how='inner')
df = df.merge(text_size_df.copy(), on=['PMCID'], how='inner')
df['is_english'] = df.country.isin({'US', 'UK', 'Canada', 'Australia'})
df['short_text'] = df.text_size < MIN_TEXT_SIZE
df['text_size_bin'] = df['text_size'].apply(lambda x: round(x, -2))
ax = sns.relplot(kind='scatter', data=df, x='text_size', y='score', hue='is_english')
plt.axvline(MIN_TEXT_SIZE)
plt.axvline(MAX_TEXT_SIZE)
ax.set(xscale='log')
savefig(ax, 'pmc.lda_coherence.text_size.scatter.png')
# now drop low text size
df = df[(df.text_size >= MIN_TEXT_SIZE) & (df.text_size <= MAX_TEXT_SIZE)].copy()
print(df[(df.score == 1) & (df.text_size >= 1000)])
# create stats for sheets output
ttest = stats.ttest_ind(df[df.is_english].text_size, df[~df.is_english].text_size, equal_var=False)
ttest_scores = [('text_size', ttest.statistic, ttest.pvalue)]
ttest = stats.ttest_ind(df[df.is_english].score, df[~df.is_english].score, equal_var=False)
ttest_scores.append(('lda_coherence', ttest.statistic, ttest.pvalue))
ttest_scores =
|
pd.DataFrame(ttest_scores, columns=['measure', 'ttest_statistic', 'ttest_pvalue'])
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import pylab as pl
import numpy as np
from scipy import ndimage
from scipy.cluster import hierarchy
from scipy.spatial import distance_matrix
from sklearn import manifold, datasets, preprocessing, metrics
from sklearn.cluster import AgglomerativeClustering
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets._samples_generator import make_blobs
from io import StringIO
from math import sqrt
import pydotplus
import itertools
# Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
# Storing the user information into a pandas dataframe
ratings_df = pd.read_csv('ratings.csv')
# Using regular expressions to find a year stored between parentheses
# We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))', expand=False)
# Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)', expand=False)
# Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
# Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
# Dropping the genres column
movies_df = movies_df.drop('genres', 1)
# Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
userInput = [
{'title': 'Breakfast Club, The', 'rating': 5},
{'title': 'Toy Story', 'rating': 3.5},
{'title': 'Jumanji', 'rating': 2},
{'title': "Pulp Fiction", 'rating': 5},
{'title': 'Akira', 'rating': 4.5}
]
inputMovies = pd.DataFrame(userInput)
# Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
# Then merging it so we can get the movieId. It's implicitly merging it by title.
inputMovies = pd.merge(inputId, inputMovies)
# Dropping information we won't use from the input dataframe
inputMovies = inputMovies.drop('year', 1)
# Filtering out users that have watched movies that the input has watched and storing it
userSubset = ratings_df[ratings_df['movieId'].isin(
inputMovies['movieId'].tolist())]
# Groupby creates several sub dataframes where they all have the same value in the column specified as the parameter
userSubsetGroup = userSubset.groupby(['userId'])
# Sorting it so users with movie most in common with the input will have priority
userSubsetGroup = sorted(
userSubsetGroup, key=lambda x: len(x[1]), reverse=True)
userSubsetGroup = userSubsetGroup[0:100]
# Store the Pearson Correlation in a dictionary, where the key is the user Id and the value is the coefficient
pearsonCorrelationDict = {}
# For every user group in our subset
for name, group in userSubsetGroup:
# Let's start by sorting the input and current user group so the values aren't mixed up later on
group = group.sort_values(by='movieId')
inputMovies = inputMovies.sort_values(by='movieId')
# Get the N for the formula
nRatings = len(group)
# Get the review scores for the movies that they both have in common
temp_df = inputMovies[inputMovies['movieId'].isin(
group['movieId'].tolist())]
# And then store them in a temporary buffer variable in a list format to facilitate future calculations
tempRatingList = temp_df['rating'].tolist()
# Let's also put the current user group reviews in a list format
tempGroupList = group['rating'].tolist()
# Now let's calculate the pearson correlation between two users, so called, x and y
Sxx = sum([i**2 for i in tempRatingList]) - \
pow(sum(tempRatingList), 2)/float(nRatings)
Syy = sum([i**2 for i in tempGroupList]) - \
pow(sum(tempGroupList), 2)/float(nRatings)
Sxy = sum(i*j for i, j in zip(tempRatingList, tempGroupList)) - \
sum(tempRatingList)*sum(tempGroupList)/float(nRatings)
# If the denominator is different than zero, then divide, else, 0 correlation.
if Sxx != 0 and Syy != 0:
pearsonCorrelationDict[name] = Sxy/sqrt(Sxx*Syy)
else:
pearsonCorrelationDict[name] = 0
pearsonDF = pd.DataFrame.from_dict(pearsonCorrelationDict, orient='index')
pearsonDF.columns = ['similarityIndex']
pearsonDF['userId'] = pearsonDF.index
pearsonDF.index = range(len(pearsonDF))
topUsers = pearsonDF.sort_values(by='similarityIndex', ascending=False)[0:50]
topUsersRating = topUsers.merge(
ratings_df, left_on='userId', right_on='userId', how='inner')
topUsersRating.head()
# Multiplies the similarity by the user's ratings
topUsersRating['weightedRating'] = topUsersRating['similarityIndex'] * \
topUsersRating['rating']
topUsersRating.head()
# Applies a sum to the topUsers after grouping it up by userId
tempTopUsersRating = topUsersRating.groupby(
'movieId').sum()[['similarityIndex', 'weightedRating']]
tempTopUsersRating.columns = ['sum_similarityIndex', 'sum_weightedRating']
tempTopUsersRating.head()
# Creates an empty dataframe
recommendation_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -- coding: utf-8 --
# PAQUETES PARA CORRER OP.
import netCDF4
import pandas as pd
import numpy as np
import datetime as dt
import json
import wmf.wmf as wmf
import hydroeval
import glob
import MySQLdb
#modulo pa correr modelo
import hidrologia
from sklearn.linear_model import LinearRegression
import math
import os
#spatial
import cartopy.crs as crs
import geopandas as gpd
import pyproj
from pyproj import transform
from cartopy.feature import ShapelyFeature
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context('notebook', font_scale=1.13)
#FORMATO
# fuente
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#avoid warnings
import warnings
warnings.filterwarnings('ignore')
#---------------
#Funciones base.
#---------------
def get_rutesList(rutas):
''' Abre el archivo de texto en la ruta: rutas, devuelve una lista de las lineas de ese archivo.
Funcion base.
#Argumentos
rutas: string, path indicado.
'''
f = open(rutas,'r')
L = f.readlines()
f.close()
return L
def set_modelsettings(ConfigList):
ruta_modelset = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_modelset')
# model settings Json
with open(ruta_modelset, 'r') as f:
model_set = json.load(f)
# Model set
wmf.models.max_aquifer = wmf.models.max_gravita * 10
wmf.models.retorno = model_set['retorno']
wmf.models.show_storage = model_set['show_storage']
wmf.models.separate_fluxes = model_set['separate_fluxes']
wmf.models.dt = model_set['dt']
def round_time(date = dt.datetime.now(),round_mins=5):
'''
Rounds datetime object to nearest 'round_time' minutes.
If 'dif' is < 'round_time'/2 takes minute behind, else takesminute ahead.
Parameters
----------
date : date to round
round_mins : round to this nearest minutes interval
Returns
----------
datetime object rounded, datetime object
'''
dif = date.minute % round_mins
if dif <= round_mins/2:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins))
else:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) + dt.timedelta(minutes=round_mins)
def get_credentials(ruta_credenciales):
credentials = json.load(open(ruta_credenciales))
#creds para consultas
mysqlServer = credentials['MySql_Siata']
for key in np.sort(list(credentials['MySql_Siata'].keys()))[::-1]: #1:hal, 2:sal
try:
connection = MySQLdb.connect(host=mysqlServer[key]['host'],
user=mysqlServer[key]['user'],
password=mysqlServer[key]['password'],
db=mysqlServer[key]['db'])
print('SERVER_CON: Succesful connection to %s'%(key))
host=mysqlServer[key]['host']
user=mysqlServer[key]['user']
password=mysqlServer[key]['password']
db=mysqlServer[key]['db']
break #si conecta bien a SAL para.
except:
print('SERVER_CON: No connection to %s'%(key))
pass
#creds para copiar a var
user2copy2var = credentials['cred_2copy2var']['user']; host2copy2var = credentials['cred_2copy2var']['host']
return host,user,password,db,user2copy2var,host2copy2var
def coord2hillID(ruta_nc, df_coordxy):
#lee simubasin pa asociar tramos, saca topologia basica
cu = wmf.SimuBasin(rute= ruta_nc)
cu.GetGeo_Cell_Basics()
cu.GetGeo_Parameters()
#saca coordenadas de todo el simubasin y las distancias entre ellas
coordsX = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[0]
coordsY = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[1]
disty = np.unique(np.diff(np.unique(np.sort(coordsY))))
distx = np.unique(np.diff(np.unique(np.sort(coordsX))))
df_ids = pd.DataFrame(index = df_coordxy.index,columns=['id'])
#identifica el id de la ladera donde caen los ptos
for index in df_coordxy.index:
df_ids.loc[index]=cu.hills_own[np.where((coordsY+disty[0]/2>df_coordxy.loc[index].values[1]) & (coordsY-disty[0]/2<df_coordxy.loc[index].values[1]) & (coordsX+distx[0]/2>df_coordxy.loc[index].values[0]) & (coordsX-distx[0]/2<df_coordxy.loc[index].values[0]))[0]].data
return df_ids
#-----------------------------------
#-----------------------------------
#Funciones de lectura del configfile
#-----------------------------------
#-----------------------------------
def get_ruta(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega rutas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i.split(' ')[-1][:-1]
else:
return 'Aviso: no existe linea con el key especificado'
def get_line(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega lineas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i[:-1].split(' ')[2:]
else:
return 'Aviso: no existe linea con el key especificado'
def get_modelPlot(RutesList, PlotType = 'Qsim_map'):
''' #Devuelve un diccionario con la informacion de la tabla Plot en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- PlotType= boolean, tipo del plot? . Default= 'Qsim_map'.
'''
for l in RutesList:
key = l.split('|')[1].rstrip().lstrip()
if key[3:] == PlotType:
EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')]
return EjecsList
return key
def get_modelPars(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in RutesList:
c = [float(i) for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c})
return DCalib
def get_modelPaths(List):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in List:
c = [i for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c[0]})
return DCalib
def get_modelStore(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Store en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStore = {}
for l in RutesList:
l = l.split('|')
DStore.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'Actualizar': l[3].rstrip().lstrip(),
'Tiempo': float(l[4].rstrip().lstrip()),
'Condition': l[5].rstrip().lstrip(),
'Calib': l[6].rstrip().lstrip(),
'BackSto': l[7].rstrip().lstrip(),
'Slides': l[8].rstrip().lstrip()}})
return DStore
def get_modelStoreLastUpdate(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Update en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStoreUpdate = {}
for l in RutesList:
l = l.split('|')
DStoreUpdate.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'LastUpdate': l[3].rstrip().lstrip()}})
return DStoreUpdate
def get_ConfigLines(RutesList, key, keyTable = None, PlotType = None):
''' #Devuelve un diccionario con la informacion de las tablas en el configfile: Calib, Store, Update, Plot.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- key= string, palabra clave de la tabla que se quiere leer. Puede ser: -s,-t.
- Calib_Storage= string, palabra clave de la tabla que se quiere leer. Puede ser: Calib, Store, Update, Plot.
- PlotType= boolean, tipo del plot? . Default= None.
'''
List = []
for i in RutesList:
if i.startswith('|'+key) or i.startswith('| '+key):
List.append(i)
if len(List)>0:
if keyTable == 'Pars':
return get_modelPars(List)
if keyTable == 'Paths':
return get_modelPaths(List)
if keyTable == 'Store':
return get_modelStore(List)
if keyTable == 'Update':
return get_modelStoreLastUpdate(List)
if keyTable == 'Plot':
return get_modelPlot(List, PlotType=PlotType)
return List
else:
return 'Aviso: no se encuentran lineas con el key de inicio especificado.'
#-----------------------------------
#-----------------------------------
#Funciones generacion de radar
#-----------------------------------
#-----------------------------------
def file_format(start,end):
'''
Returns the file format customized for siata for elements containing
starting and ending point
Parameters
----------
start : initial date
end : final date
Returns
----------
file format with datetimes like %Y%m%d%H%M
Example
----------
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
format = '%Y%m%d%H%M'
return '%s-%s'%(start.strftime(format),end.strftime(format))
def hdr_to_series(path):
'''
Reads hdr rain files and converts it into pandas Series
Parameters
----------
path : path to .hdr file
Returns
----------
pandas time Series with mean radar rain
'''
s = pd.read_csv(path,skiprows=5,usecols=[2,3]).set_index(' Fecha ')[' Lluvia']
s.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],s.index)))
return s
def hdr_to_df(path):
'''
Reads hdr rain files and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr file
Returns
----------
pandas DataFrame with mean radar rain
'''
if path.endswith('.hdr') != True:
path = path+'.hdr'
df = pd.read_csv(path,skiprows=5).set_index(' Fecha ')
df.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],df.index)))
df = df.drop('IDfecha',axis=1)
df.columns = ['record','mean_rain']
return df
def bin_to_df(path,ncells,start=None,end=None,**kwargs):
'''
Reads rain fields (.bin) and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr and .bin file
start : initial date
end : final date
Returns
----------
pandas DataFrame with mean radar rain
Note
----------
path without extension, ejm folder_path/file not folder_path/file.bin,
if start and end is None, the program process all the data
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
records = df['record'].values
rain_field = []
for count,record in enumerate(records):
if record != 1:
rain_field.append(wmf.models.read_int_basin('%s.bin'%path,record,ncells)[0]/1000.0)
count = count+1
# format = (count*100.0/len(records),count,len(records))
else:
rain_field.append(np.zeros(ncells))
return pd.DataFrame(np.matrix(rain_field),index=df.index)
def get_radar_rain(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
mask=None,meanrain_ALL=True,path_masks_csv=None,complete_naninaccum=False,save_bin=False,
save_class = False,path_res=None,umbral=0.005,
verbose=True, zero_fill = None):
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de rutas y la de las fechas a las que corresponde cada ruta.
ListRutas.sort()
ListDatesinNC.sort()#con estas fechas se asignaran los barridos a cada timestep.
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
#Saca una lista con las pos de los barridos por cada timestep, y las pega en PosDates
#Si el limite de completar faltantes con barrido anterior es de 10 min, solo se completa si dt=300s
#limite de autocompletar : 10m es decir, solo repito un barrido.
PosDates = []
pos1 = []
pos_completed = []
lim_completed = 3 #ultimos 3 barridos - 15min
for ind,d1,d2 in zip(np.arange(datesDt[:-1].size),datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
# si no hay barridos en el dt de inicio sellena con zero - lista vacia
#y no esta en los primero 3 pasos : 15min.
# si se puede completar
# y si en el los lim_completed pasos atras no hubo más de lim_completed-1 pos con pos_completed=2, lim_completed-1 para que deje correr sólo hasta el lim_completed.
#asi solo se pueded completar y pos_completed=2 una sola vez.
if len(pos2) == 0 and ind not in np.arange(lim_completed) and complete_naninaccum == True and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 : #+1 porque coge los ultimos n-1 posiciones.
pos2 = pos1
pos_completed.append(2)
elif len(pos2) == 0:
pos2=[]
pos_completed.append(0)
else:
pos_completed.append(1)
#si se quiere completar y hay barridos en este dt, guarda esta pos para si es necesario completar las pos de dt en el sgte paso
if complete_naninaccum == True and len(pos2) != 0 and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 :
pos1 = pos2
else:
pos1 = []
PosDates.append(pos2)
# si se asigna, se agregas dates y PosDates para barridos en cero al final.
if zero_fill is not None:
#se redefinen datesDt luego que los PosDates fueron asignados
final = (pd.to_datetime(final) + pd.Timedelta('%ss'%Dt*zero_fill)).strftime('%Y-%m-%d %H:%M')
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
# se agrega a PosDates pasos del futuro con barridos en cero, y se cambia end.
end = end + pd.Timedelta('%ss'%Dt*zero_fill) #pasos de tiempo:steps, independiente del Dt
for steps in np.arange(zero_fill): PosDates.append([])
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#mascara con shp a parte de wmf
if mask is not None:
#se abre un barrido para sacar la mascara
g = netCDF4.Dataset(ListRutas[PosDates[0][0]])
field = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)#g['Rain'][:]#
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
g.close()
longs=np.array([RadProp[2]+0.5*RadProp[4]+i*RadProp[4] for i in range(RadProp[0])])
lats=np.array([RadProp[3]+0.5*RadProp[5]+i*RadProp[5] for i in range(RadProp[1])])
x,y = np.meshgrid(longs,lats)
#mask as a shp
if type(mask) == str:
#boundaries
shp = gpd.read_file(mask)
poly = shp.geometry.unary_union
shp_mask = np.zeros([len(lats),len(longs)])
for i in range(len(lats)):
for j in range(len(longs)):
if (poly.contains(Point(longs[j],lats[i])))==True:
shp_mask[i,j] = 1# Rain_mask es la mascara
l = x[shp_mask==1].min()
r = x[shp_mask==1].max()
d = y[shp_mask==1].min()
a = y[shp_mask==1].max()
#mask as a list with coordinates whithin the radar extent
elif type(mask) == list:
l = mask[0] ; r = mask[1] ; d = mask[2] ; a = mask[3]
x,y = x.T,y.T #aun tengo dudas con el recorte, si en nc queda en la misma pos que los lats,longs.
#boundaries position
x_wh,y_wh = np.where((x>l)&(x<r)&(y>d)&(y<a))
#se redefine sfield con size que corresponde
field = field[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
if save_bin and len(codigos)==1 and path_res is not None:
#open nc file
f = netCDF4.Dataset(path_res,'w', format='NETCDF4') #'w' stands for write
tempgrp = f.createGroup('rad_data') # as folder for saving files
lon = longs[np.unique(x_wh)[0]:np.unique(x_wh)[-1]]
lat = lats[np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
#set name and leght of dimensions
tempgrp.createDimension('lon', len(lon))
tempgrp.createDimension('lat', len(lat))
tempgrp.createDimension('time', None)
#building variables
longitude = tempgrp.createVariable('longitude', 'f4', 'lon')
latitude = tempgrp.createVariable('latitude', 'f4', 'lat')
rain = tempgrp.createVariable('rain', 'f4', (('time', 'lat', 'lon')))
time = tempgrp.createVariable('time', 'i4', 'time')
#adding globalattributes
f.description = "Radar rainfall dataset containing one group"
f.history = "Created " + dt.datetime.now().strftime("%d/%m/%y")
#Add local attributes to variable instances
longitude.units = 'degrees east - wgs4'
latitude.units = 'degrees north - wgs4'
time.units = 'minutes since 2020-01-01 00:00'
rain.units = 'mm/h'
#passing data into variables
# use proper indexing when passing values into the variables - just like you would a numpy array.
longitude[:] = lon #The "[:]" at the end of the variable instance is necessary
latitude[:] = lat
else:
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
#accumulated in basin
if accum:
if mask is not None:
rvec_accum = np.zeros(field.shape)
dfaccum = pd.DataFrame(index = rng) #este producto no da con mask.
else:
rvec_accum = np.zeros(cu.ncells)
# rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#ITERA SOBRE LOS BARRIDOS DEL PERIODO Y SE SACAN PRODUCTOS
# print ListRutas
for ind,dates,pos in zip(np.arange(len(datesDt[1:])),datesDt[1:],PosDates):
#escoge como definir el size de rvec
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells, dtype = int)
rStra = np.zeros(cu.ncells, dtype = int)
try:
#se lee y agrega lluvia de los nc en el intervalo.
for c,p in enumerate(pos):
#lista archivo leido
if verbose:
print (ListRutas[p])
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(ListRutas[p])
rainfield = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#if all extent
if all_radextent:
radmatrix += rainfield
#if mask
if mask is not None and type(mask) == str:
rvec += (rainfield*shp_mask)[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
elif mask is not None and type(mask) == list:
rvec += rainfield[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
# on WMF.
else:
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(rainfield,RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
Conv[rvec == 0] = 0
Stra[rvec == 0] = 0
#Cierra el netCDF
g.close()
#muletilla
path = 'bla'
except:
print ('error - no field found ')
path = ''
if accum:
if mask is not None:
rvec += np.zeros(shape = field.shape)
rvec = np.zeros(shape = field.shape)
else:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
if mask is None: #esto para mask no sirve
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
#subbasins defined for WMF
if meanrain_ALL and mask is None:
mean = []
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec*df_posmasks['%s'%codigo])/float(df_posmasks['%s'%codigo][df_posmasks['%s'%codigo]==1].size))
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
mean = []
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#si guarda nc de ese timestep guarda clasificados
if dentro == 0:
hagalo = True
else:
hagalo = False
#mira si guarda o no los clasificados
if save_class:
#Escribe el binario convectivo
aa = cuConv.rain_radar2basin_from_array(vec = rConv,
ruta_out = path_res+'_conv',
fecha = dates,
dt = Dt,
doit = hagalo)
#Escribe el binario estratiforme
aa = cuStra.rain_radar2basin_from_array(vec = rStra,
ruta_out = path_res+'_stra',
fecha = dates,
dt = Dt,
doit = hagalo)
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
elif mask is None and save_bin == True and len(codigos)==1 and path_res is None: #si es una cuenca pero no se quiere guardar binarios.
mean = []
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
#guardar .nc con info de recorte de radar: mask.
if mask is not None and save_bin and len(codigos)==1 and path_res is not None:
mean = []
#https://pyhogs.github.io/intro_netcdf4.html
rain[ind,:,:] = rvec.T
time[ind] = int((dates - pd.to_datetime('2010-01-01 00:00')).total_seconds()/60) #min desde 2010
if ind == np.arange(len(datesDt[1:]))[-1]:
f.close()
print ('.nc saved')
#guarda en df meanrainfall.
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec)/float(shp_mask[shp_mask==1].size))
#save
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
pass
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True and mask is not None:
return df,rvec_accum
elif accum == True and mask is None:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de dias y de rutas
ListDatesinNC.sort()
ListRutas.sort()
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
PosDates = []
pos1 = [0]
for d1,d2 in zip(datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
if len(pos2) == 0 and complete_naninaccum == True: # si no hay barridos en el dt de inicio ellena con cero
pos2 = pos1
elif complete_naninaccum == True: #si hay barridos en este dt guarda esta pos para si es necesario completar las pos de dt en el sgte paso
pos1 = pos2
elif len(pos2) == 0:
pos2=[]
PosDates.append(pos2)
paths_inperiod = [[ListRutas[p] for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
pospaths_inperiod = [[p for c,p in enumerate(pos)] for dates,pos in zip(datesDt[1:],PosDates)]
######### LISTA EN ORDEN CON ARCHIVOS OBSERVADOS Y ESCENARIOS#############3
##### buscar el ultimo campo de lluvia observado ######
datessss = []
nc010 = []
for date,l_step,lpos_step in zip(datesDt[1:],paths_inperiod,pospaths_inperiod):
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
#siempre intenta buscar en cada paso de tiempo el observado, solo si no puede, busca escenarios futuros.
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
nc010.append(path)
datessss.append(date)
######punto a partir del cual usar escenarios
#si dentro del periodo existe alguno len(date)>1, sino = 0 (todo el periodo corresponde a forecast)
#si no existe pos_lastradarfield = pos del primer paso de tiempo paraque se cojan todos los archivos
if len(datessss)>0:
pos_lastradarfield = np.where(datesDt[1:]==datessss[-1])[0][0]
else:
pos_lastradarfield = 0
list_paths= []
# escoge rutas y pos organizados para escenarios, por ahora solo sirve con 1 barrido por timestep.
for ind,date,l_step,lpos_step in zip(np.arange(datesDt[1:].size),datesDt[1:],paths_inperiod,pospaths_inperiod):
# pos_step = []; paths_step = []
if len(l_step) == 0:
list_paths.append('')
else:
# ordenar rutas de ncs
for path,pospath in zip(l_step[::-1],lpos_step[::-1]): # que siempre el primer nc leido sea el observado si lo hay
# print (ind,path,pospath)
#si es un evento viejo
if evs_hist:
#primero escanarios futuros.
if include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios):
list_paths.append(path)
break
#despues observados.
elif path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
#si es rigth now
else:
#primero observados y para ahi si se lo encontro
if path.split('/')[-1].split('_')[-1].split('.')[0].endswith('120'):
list_paths.append(path)
break
#despues escenarios futuros, y solo despues que se acaban observados
elif include_escenarios is not None and path.split('/')[-1].split('_')[-1].startswith(include_escenarios) and ind > pos_lastradarfield:
list_paths.append(path)
######### LECTURA DE CUENCA, DATOS Y GUARDADO DE BIN.###########
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#accumulated in basin
if accum:
rvec_accum = np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#itera sobre ncs abre y guarda ifnfo
for dates,path in zip(datesDt[1:],list_paths):
if verbose:
print (dates,path)
rvec = np.zeros(cu.ncells)
if path != '': #sino hay archivo pone cero.
try:
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(path)
#if all extent
if all_radextent:
radmatrix += g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0)
#on basins --> wmf.
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(g.variables['Rain'][:].T/(((1*3600)/Dt)*1000.0),RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
rConv[rvec == 0] = 0
rStra[rvec == 0] = 0
#Cierra el netCDF
g.close()
except:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
else:
print ('error - zero field ')
if accum:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
if meanrain_ALL:
mean = []
#para todas
for codigo in codigos:
if '%s.tif'%(codigo) in os.listdir('/media/nicolas/Home/nicolas/01_SIATA/info_operacional_cuencas_nivel/red_nivel/tif_mascaras/'):
mask_path = '/media/nicolas/Home/nicolas/01_SIATA/info_operacional_cuencas_nivel/red_nivel/tif_mascaras/%s.tif'%(codigo)
mask_map = wmf.read_map_raster(mask_path)
mask_vect = cu.Transform_Map2Basin(mask_map[0],mask_map[1])
else:
mask_vect = None
if mask_vect is not None:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
try:
mean.append(np.sum(mask_vect*rvec)/float(mask_vect[mask_vect==1].size))
except: # para las que no hay mascara.
mean.append(np.nan)
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if save_bin == True and len(codigos)==1 and path_res is not None:
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#guarda en df meanrainfall.
mean = []
if path != '':
mean.append(rvec.mean())
else:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP_newmasks(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
path_masks_csv = None,verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
include_escenarios: string wth the name of scenarios to use for future.
path_masks_csv: string with path of csv with pos of masks, pos are related tu the shape of the simubasin designated.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de dias y de rutas
ListDatesinNC.sort()
ListRutas.sort()
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC =
|
pd.to_datetime(datesinNC)
|
pandas.to_datetime
|
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
|
assert_frame_equal(result2, expected)
|
pandas.util.testing.assert_frame_equal
|
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=cols1 + ["strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=cols2 + ["strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
df2 = pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])[
["chrom2", "start", "end", "strand", "animal"]
]
df2 = df2.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
assert (2, 5) == np.shape(bioframe.setdiff(df1, df1, cols1=cols1, cols2=cols1))
assert (2, 5) == np.shape(bioframe.setdiff(df1, df2, cols1=cols1, cols2=cols2))
assert (4, 5) == np.shape(
bioframe.setdiff(df1, df2, on=["strand"], cols1=cols1, cols2=cols2)
)
def test_count_overlaps():
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chr1", 6, 10, "+", "dog"],
["chr1", 6, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
assert (
bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 2, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 0, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand", "animal"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([0, 0, 0])
).all()
# overlaps with pd.NA
counts_no_nans = bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
df1_na = (pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)[["chrom1", "start", "end", "strand", "animal"]]
df2_na = (pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])).astype(
{
"start2": pd.Int64Dtype(),
"end2": pd.Int64Dtype(),
}
)[["chrom2", "start2", "end2", "strand", "animal"]]
counts_nans_inserted_after = (
pd.concat([pd.DataFrame([pd.NA]), counts_no_nans, pd.DataFrame([pd.NA])])
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype(),})[
["chrom1", "start", "end", "strand", "animal", "count"]
]
counts_nans = bioframe.count_overlaps(
df1_na,
df2_na,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
pd.testing.assert_frame_equal(
counts_nans,
bioframe.count_overlaps(
df1_na,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
),
)
assert (
counts_nans["count"].values
== counts_nans_inserted_after["count"].fillna(0).values
).all()
### coverage without return_input returns a single column dataFrame
pd.testing.assert_frame_equal(
bioframe.count_overlaps(
df1_na,
df2_na,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_input=False,
),
pd.DataFrame(counts_nans["count"]),
)
def test_assign_view():
## default assignment case
view_df = pd.DataFrame(
[
["chr11", 1, 8, "chr11p"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr11", 0, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
df_assigned = pd.DataFrame(
[
["chr11", 0, 10, "+", "chr11p"],
],
columns=["chrom", "start", "end", "strand", "view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_assigned, bioframe.assign_view(df, view_df))
# assignment with funny view_name_col and an interval on chr2 not cataloged in the view_df
view_df = pd.DataFrame(
[
["chrX", 1, 8, "oranges"],
["chrX", 8, 20, "grapefruit"],
["chr1", 0, 10, "apples"],
],
columns=["chrom", "start", "end", "fruit"],
)
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
df_assigned = pd.DataFrame(
[
["chr1", 0, 10, "+", "apples"],
["chrX", 5, 10, "+", "oranges"],
["chrX", 0, 5, "+", "oranges"],
],
columns=["chrom", "start", "end", "strand", "funny_view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df,
view_name_col="fruit",
df_view_col="funny_view_region",
drop_unassigned=True,
),
)
### keep the interval with NA as its region if drop_unassigned is False
df_assigned = pd.DataFrame(
[
["chr1", 0, 10, "+", "apples"],
["chrX", 5, 10, "+", "oranges"],
["chrX", 0, 5, "+", "oranges"],
["chr2", 5, 10, "+", pd.NA],
],
columns=["chrom", "start", "end", "strand", "funny_view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df,
view_name_col="fruit",
df_view_col="funny_view_region",
drop_unassigned=False,
),
)
### assign_view with NA values assigns a view of none
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
[pd.NA, pd.NA, pd.NA, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df, bioframe.assign_view(df, view_df, view_name_col="fruit").iloc[:, :-1]
)
assert (
bioframe.assign_view(df, view_df, view_name_col="fruit")["view_region"].values
== np.array(["apples", "oranges", None, "oranges", None], dtype=object)
).all()
def test_sort_bedframe():
view_df = pd.DataFrame(
[
["chrX", 1, 8, "oranges"],
["chrX", 8, 20, "grapefruit"],
["chr1", 0, 10, "apples"],
],
columns=["chrom", "start", "end", "fruit"],
)
df = pd.DataFrame(
[
["chr2", 5, 10, "+"],
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
["chrX", 0, 5, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
# sorting just by chrom,start,end
df_sorted = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chr2", 5, 10, "+"],
["chrX", 0, 5, "+"],
["chrX", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
pd.testing.assert_frame_equal(df_sorted, bioframe.sort_bedframe(df))
# when a view_df is provided, regions without assigned views
# are placed last and view_region is returned as a categorical
df_sorted = pd.DataFrame(
[
["chrX", 0, 5, "+"],
["chrX", 5, 10, "+"],
["chr1", 0, 10, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
pd.testing.assert_frame_equal(
df_sorted, bioframe.sort_bedframe(df, view_df, view_name_col="fruit")
)
# also test if sorting after assiging view to df denovo works,
# which is triggered by df_view_col = None:
pd.testing.assert_frame_equal(
df_sorted, bioframe.sort_bedframe(df, view_df)
)
# also test if sorting after assiging view to df from chromsizes-like dictionary works:
pd.testing.assert_frame_equal(
df_sorted, bioframe.sort_bedframe(df, view_df={"chrX":20, "chr1":10})
)
### 'df' has no column 'view_region', so this should raise a ValueError
assert pytest.raises(
ValueError,
bioframe.sort_bedframe,
df,
view_df,
view_name_col="fruit",
df_view_col="view_region",
)
### sort_bedframe with NA entries:
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
[pd.NA, pd.NA, pd.NA, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start":
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"),
|
Timestamp("19900315")
|
pandas.Timestamp
|
import pytest
import pandas as pd
import numpy as np
@pytest.fixture(scope="function")
def set_helpers(request):
rand = np.random.RandomState(1337)
request.cls.ser_length = 120
request.cls.window = 12
request.cls.returns = pd.Series(
rand.randn(1, 120)[0] / 100.0,
index=pd.date_range("2000-1-30", periods=120, freq="M"),
)
request.cls.factor_returns = pd.Series(
rand.randn(1, 120)[0] / 100.0,
index=pd.date_range("2000-1-30", periods=120, freq="M"),
)
@pytest.fixture(scope="session")
def input_data():
simple_benchmark = pd.Series(
np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
)
rand = np.random.RandomState(1337)
noise = pd.Series(
rand.normal(0, 0.001, 1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
inv_noise = noise.multiply(-1)
noise_uniform = pd.Series(
rand.uniform(-0.01, 0.01, 1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
random_100k = pd.Series(rand.randn(100_000))
mixed_returns = pd.Series(
np.array([np.nan, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
)
one = [
-0.00171614,
0.01322056,
0.03063862,
-0.01422057,
-0.00489779,
0.01268925,
-0.03357711,
0.01797036,
]
two = [
0.01846232,
0.00793951,
-0.01448395,
0.00422537,
-0.00339611,
0.03756813,
0.0151531,
0.03549769,
]
# Sparse noise, same as noise but with np.nan sprinkled in
replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_noise = noise.replace(replace_nan, np.nan)
# Flat line tz
flat_line_1_tz = pd.Series(
np.linspace(0.01, 0.01, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
# Sparse flat line at 0.01
# replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_flat_line_1_tz = flat_line_1_tz.replace(replace_nan, np.nan)
df_index_simple = pd.date_range("2000-1-30", periods=8, freq="D")
df_index_week = pd.date_range("2000-1-30", periods=8, freq="W")
df_index_month = pd.date_range("2000-1-30", periods=8, freq="M")
df_week = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_week),
"two": pd.Series(two, index=df_index_week),
}
)
df_month = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_month),
"two": pd.Series(two, index=df_index_month),
}
)
df_simple = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_simple),
"two": pd.Series(two, index=df_index_simple),
}
)
df_week = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_week),
"two": pd.Series(two, index=df_index_week),
}
)
df_month = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_month),
"two": pd.Series(two, index=df_index_month),
}
)
input_one = [
np.nan,
0.01322056,
0.03063862,
-0.01422057,
-0.00489779,
0.01268925,
-0.03357711,
0.01797036,
]
input_two = [
0.01846232,
0.00793951,
-0.01448395,
0.00422537,
-0.00339611,
0.03756813,
0.0151531,
np.nan,
]
df_index = pd.date_range("2000-1-30", periods=8, freq="D")
return {
# Simple benchmark, no drawdown
"simple_benchmark": simple_benchmark,
"simple_benchmark_w_noise": simple_benchmark
+ rand.normal(0, 0.001, len(simple_benchmark)),
"simple_benchmark_df": simple_benchmark.rename("returns").to_frame(),
# All positive returns, small variance
"positive_returns": pd.Series(
np.array([1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# All negative returns
"negative_returns": pd.Series(
np.array([0.0, -6.0, -7.0, -1.0, -9.0, -2.0, -6.0, -8.0, -5.0])
/ 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# All negative returns
"all_negative_returns": pd.Series(
np.array([-2.0, -6.0, -7.0, -1.0, -9.0, -2.0, -6.0, -8.0, -5.0])
/ 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# Positive and negative returns with max drawdown
"mixed_returns": mixed_returns,
# Weekly returns
"weekly_returns": pd.Series(
np.array([0.0, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="W"),
),
# Monthly returns
"monthly_returns": pd.Series(
np.array([0.0, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="M"),
),
# Series of length 1
"one_return": pd.Series(
np.array([1.0]) / 100,
index=pd.date_range("2000-1-30", periods=1, freq="D"),
),
"udu_returns": pd.Series(
np.array([10, -10, 10]) / 100,
index=pd.date_range("2000-1-30", periods=3, freq="D"),
),
# Empty series
"empty_returns": pd.Series(
np.array([]) / 100,
index=pd.date_range("2000-1-30", periods=0, freq="D"),
),
# Random noise
"noise": noise,
"noise_uniform": noise_uniform,
"random_100k": random_100k,
# Random noise inv
"inv_noise": inv_noise,
# Flat line
"flat_line_0": pd.Series(
np.linspace(0, 0, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
),
"flat_line_1": pd.Series(
np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# Flat line with tz
"flat_line_1_tz": pd.Series(
np.linspace(0.01, 0.01, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
),
"flat_line_yearly": pd.Series(
np.array([3.0, 3.0, 3.0]) / 100,
index=pd.date_range("2000-1-30", periods=3, freq="A"),
),
# Positive line
"pos_line": pd.Series(
np.linspace(0, 1, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
),
# Negative line
"neg_line": pd.Series(
np.linspace(0, -1, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
),
# Sparse noise, same as noise but with np.nan sprinkled in
"sparse_noise": sparse_noise,
# Sparse flat line at 0.01
"sparse_flat_line_1_tz": sparse_flat_line_1_tz,
"one": one,
"two": two,
"df_index_simple": df_index_simple,
"df_index_week": df_index_week,
"df_index_month": df_index_month,
"df_simple": df_simple,
"df_week": df_week,
"df_month": df_month,
"df_empty":
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 2020
@author: simonfilhol
Tool box to compute, analyze and work with spatial observation of snow
"""
import pandas as pd
import numpy as np
import gdal
import scipy.interpolate as interp
###############################################################################################################
# SNOW from a Drone
###############################################################################################################
###############################################################################################################
# SNOW from a dGPS track
###############################################################################################################
# Function to aggregate non uniform GPS points to a regular grid
def aggregate_SD_track(df, extent=[419347, 420736, 6716038, 6717426], spatialResolution=0.2):
'''
Function to resample/aggregate snow depth derived from dGPS track (inherently not homogenous in space) at a given spatial resolution.
:param df: dataframe containing the at least the following columns ['East', 'Elev', 'North', 'SD']
:param extent: extent to consider, drop all measurment outside this range. [xmin, xmax, ymin, ymax]
:param spatialResolution: spatial resolution in meter
:return: a resampled version of the original dataframe
'''
E_bin = np.arange(extent[0], extent[1], spatialResolution)
N_bin = np.arange(extent[2], extent[3], spatialResolution)
dates = np.unique(df.Date)
resampled = pd.DataFrame()
for i, date in enumerate(dates):
tmp = df.loc[(df.Date == date) & ((df.East > extent[0])|(df.East<extent[1])|(df.North>extent[2])|(df.North < extent[3]))]
if tmp.shape[0]>0:
E_cuts =
|
pd.cut(tmp.East, E_bin, labels=False)
|
pandas.cut
|
# coding: utf-8
# # Python for Padawans
#
# This tutorial will go throughthe basic data wrangling workflow I'm sure you all love to hate, in Python!
# FYI: I come from a R background (aka I'm not a proper programmer) so if you see any formatting issues please cut me a bit of slack.
#
# **The aim for this post is to show people how to easily move their R workflows to Python (especially pandas/scikit)**
#
# One thing I especially like is how consistent all the functions are. You don't need to switch up style like you have to when you move from base R to dplyr etc.
# |
# And also, it's apparently much easier to push code to production using Python than R. So there's that.
#
# ### 1. Reading in libraries
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
import os
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import math
# #### Don't forget that %matplotlib function. Otherwise your graphs will pop up in separate windows and stop the execution of further cells. And nobody got time for that.
#
# ### 2. Reading in data
# In[ ]:
data = pd.read_csv('../input/loan.csv', low_memory=False)
data.drop(['id', 'member_id', 'emp_title'], axis=1, inplace=True)
data.replace('n/a', np.nan,inplace=True)
data.emp_length.fillna(value=0,inplace=True)
data['emp_length'].replace(to_replace='[^0-9]+', value='', inplace=True, regex=True)
data['emp_length'] = data['emp_length'].astype(int)
data['term'] = data['term'].apply(lambda x: x.lstrip())
# ### 3. Basic plotting using Seaborn
#
# Now let's make some pretty graphs. Coming from R I definitely prefer ggplot2 but the more I use Seaborn, the more I like it. If you kinda forget about adding "+" to your graphs and instead use the dot operator, it does essentially the same stuff.
#
# **And I've just found out that you can create your own style sheets to make life easier. Wahoo!**
#
# But anyway, below I'll show you how to format a decent looking Seaborn graph, as well as how to summarise a given dataframe.
# In[ ]:
import seaborn as sns
import matplotlib
s =
|
pd.value_counts(data['emp_length'])
|
pandas.value_counts
|
# util.py
from __future__ import print_function
from collections import Mapping, OrderedDict
import datetime
import itertools
import random
import warnings
import pandas as pd
np = pd.np
from scipy import integrate
from matplotlib import pyplot as plt
import seaborn
from scipy.optimize import minimize
from scipy.signal import correlate
from titlecase import titlecase
from pug.nlp.util import listify, fuzzy_get, make_timestamp
def dropna(x):
"""Delete all NaNs and and infinities in a sequence of real values
Returns:
list: Array of all values in x that are between -inf and +inf, exclusive
"""
return [x_i for x_i in listify(x) if float('-inf') < x_i < float('inf')]
def rms(x):
""""Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(sum(x_i**2 for x_i in x) / len(x))
or
return (np.array(x) ** 2).mean() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
"""
try:
return (np.array(x) ** 2).mean() ** 0.5
except:
x = np.array(dropna(x))
invN = 1.0 / len(x)
return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
def rmse(target, prediction, relative=False, percent=False):
"""Root Mean Square Error
This seems like a simple formula that you'd never need to create a function for.
But my mistakes on coding challenges have convinced me that I do need it,
as a reminder of important tweaks, if nothing else.
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1])
3.0
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], relative=True) # doctest: +ELLIPSIS
1.2247...
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], percent=True) # doctest: +ELLIPSIS
122.47...
"""
relative = relative or percent
prediction =
|
pd.np.array(prediction)
|
pandas.np.array
|
from libs.grammar.q_learner import QValues
from test_base import Test
import pandas as pd
import numpy as np
class TestQValues(Test):
def __init__(self):
self.q_csv_path = 'needed_for_testing/q_values.csv'
def test_load_unload(self):
qvals = QValues()
qvals.load_q_values(self.q_csv_path)
qvals.save_to_csv(self.q_csv_path + '.test')
qcsv_before =
|
pd.read_csv(self.q_csv_path)
|
pandas.read_csv
|
#!/usr/bin/python2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
print("[*] Program reading and analyzing data")
#Import data and store each line as list
data = open("stockData.csv", "r")
lines = data.readlines()
#split into 2d array
for item in range(len(lines)):
lines[item] = lines[item].split(",")
#remove extraneous commas and endlines
for item in lines:
for i in range(len(item)):
item[i] = item[i].replace("\n","")
item[i] = item[i].replace(",","")
item[i] = item[i].replace("\"","")
#creating data frame
stockData = pd.DataFrame(lines[2:],columns = lines[0])
priceData = stockData["close"]
#creating 10 point medians and adding to end of dataframe
mediansTEN = []
for point in range(len(priceData)):
median = 0.0
total = 10.0
for i in range(10):
try:
median += float(priceData[point+i])
except:
total -= 1
mediansTEN.append(median/total)
stockData["mediansTEN"] = pd.Series(mediansTEN, index=stockData.index)
#creating 20 point medians and adding to end of dataframe
mediansTWENTY = []
for point in range(len(priceData)):
median = 0.0
total = 20.0
for i in range(20):
try:
median += float(priceData[point+i])
except:
total -= 1
mediansTWENTY.append(median/total)
stockData["mediansTWENTY"] = pd.Series(mediansTWENTY, index=stockData.index)
#creating 50 point medians and adding to end of dataframe
mediansFIFTY = []
for point in range(len(priceData)):
median = 0.0
total = 50.0
for i in range(50):
try:
median += float(priceData[point+i])
except:
total -= 1
mediansFIFTY.append(median/total)
stockData["mediansFIFTY"] = pd.Series(mediansFIFTY, index=stockData.index)
#creating 100 point medians and adding to end of dataframe
mediansHUNDRED = []
for point in range(len(priceData)):
median = 0.0
total = 100.0
for i in range(100):
try:
median += float(priceData[point+i])
except:
total -= 1
mediansHUNDRED.append(median/total)
stockData["mediansHUNDRED"] =
|
pd.Series(mediansHUNDRED, index=stockData.index)
|
pandas.Series
|
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from pathlib import Path
from datetime import datetime, date
# Preprocess terms for TF-IDF
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from num2words import num2words
# end of preprocess
# LDA
from gensim import corpora, models
import pyLDAvis.gensim
# print in color
from termcolor import colored
# end LDA
import pandas as pd
import geopandas
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.corpus import wordnet
# SPARQL
import sparql
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
from matplotlib_venn_wordcloud import venn3_wordcloud
# multiprocessing
# BERT
from transformers import pipeline
# LOG
import logging
from logging.handlers import RotatingFileHandler
def biotexInputBuilder(tweetsofcity):
"""
Build and save a file formated for Biotex analysis
:param tweetsofcity: dictionary of { tweets, created_at }
:return: none
"""
biotexcorpus = []
for city in tweetsofcity:
# Get all tweets for a city :
listOfTweetsByCity = [tweets['tweet'] for tweets in tweetsofcity[city]]
# convert this list in a big string of tweets by city
document = '\n'.join(listOfTweetsByCity)
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def preprocessTerms(document):
"""
Pre process Terms according to
https://towardsdatascience.com/tf-idf-for-document-ranking-from-scratch-in-python-on-real-world-dataset-796d339a4089
/!\ Be carefull : it has a long execution time
:param:
:return:
"""
def lowercase(t):
return np.char.lower(t)
def removesinglechar(t):
words = word_tokenize(str(t))
new_text = ""
for w in words:
if len(w) > 1:
new_text = new_text + " " + w
return new_text
def removestopwords(t):
stop_words = stopwords.words('english')
words = word_tokenize(str(t))
new_text = ""
for w in words:
if w not in stop_words:
new_text = new_text + " " + w
return new_text
def removeapostrophe(t):
return np.char.replace(t, "'", "")
def removepunctuation(t):
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
for i in range(len(symbols)):
data = np.char.replace(t, symbols[i], ' ')
data = np.char.replace(t, " ", " ")
data = np.char.replace(t, ',', '')
return data
def convertnumbers(t):
tokens = word_tokenize(str(t))
new_text = ""
for w in tokens:
try:
w = num2words(int(w))
except:
a = 0
new_text = new_text + " " + w
new_text = np.char.replace(new_text, "-", " ")
return new_text
doc = lowercase(document)
doc = removesinglechar(doc)
doc = removestopwords(doc)
doc = removeapostrophe(doc)
doc = removepunctuation(doc)
doc = removesinglechar(doc) # apostrophe create new single char
return doc
def biotexAdaptativeBuilderAdaptative(listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Build a input biotex file well formated at the level wanted by concatenate cities's tweets
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return:
"""
matrixAggDay = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv")
# concat date with city
matrixAggDay['city'] = matrixAggDay[['city', 'day']].agg('_'.join, axis=1)
del matrixAggDay['day']
## change index
matrixAggDay.set_index('city', inplace=True)
matrixFiltred = spatiotemporelFilter(matrix=matrixAggDay, listOfcities=listOfcities,
spatialLevel='state', period=period)
## Pre-process :Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixFiltred["city"], matrixFiltred["state"], matrixFiltred["country"], matrixFiltred["date"] = \
zip(*matrixFiltred.index.map(splitindex))
# Agregate by level
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state':
matrixFiltred = matrixFiltred.groupby('state')['tweetsList'].apply('.\n'.join).reset_index()
elif spatialLevel == 'country':
matrixFiltred = matrixFiltred.groupby('country')['tweetsList'].apply('.\n'.join).reset_index()
# Format biotex input file
biotexcorpus = []
for index, row in matrixFiltred.iterrows():
document = row['tweetsList']
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK-adaptativebiotex"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def ldHHTFIDF(listOfcities):
""" /!\ for testing only !!!!
Only work if nb of states = nb of cities
i.e for UK working on 4 states with their capitals...
"""
print(colored("------------------------------------------------------------------------------------------", 'red'))
print(colored(" - UNDER DEV !!! - ", 'red'))
print(colored("------------------------------------------------------------------------------------------", 'red'))
tfidfwords = pd.read_csv("elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv", index_col=0)
texts = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv", index_col=1)
listOfStatesTopics = []
for i, citystate in enumerate(listOfcities):
city = str(listOfcities[i].split("_")[0])
state = str(listOfcities[i].split("_")[1])
# print(str(i) + ": " + str(state) + " - " + city)
# tfidfwords = [tfidfwords.iloc[0]]
dictionary = corpora.Dictionary([tfidfwords.loc[state]])
textfilter = texts.loc[texts.index.str.startswith(city + "_")]
corpus = [dictionary.doc2bow(text.split()) for text in textfilter.tweetsList]
# Find the better nb of topics :
## Coherence measure C_v : Normalised PointWise Mutual Information (NPMI : co-occurence probability)
## i.e degree of semantic similarity between high scoring words in the topic
## and cosine similarity
nbtopics = range(2, 35)
coherenceScore = pd.Series(index=nbtopics, dtype=float)
for n in nbtopics:
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=n)
# Compute coherence score
## Split each row values
textssplit = textfilter.tweetsList.apply(lambda x: x.split()).values
coherence = models.CoherenceModel(model=lda, texts=textssplit, dictionary=dictionary, coherence='c_v')
coherence_result = coherence.get_coherence()
coherenceScore[n] = coherence_result
# print("level: " + str(state) + " - NB: " + str(n) + " - coherence LDA: " + str(coherenceScore[n]))
# Relaunch LDA with the best nbtopic
nbTopicOptimal = coherenceScore.idxmax()
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=nbTopicOptimal)
# save and visualisation
## save
for topic, listwords in enumerate(lda.show_topics()):
stateTopic = {'state': state}
ldaOuput = str(listwords).split(" + ")[1:]
for i, word in enumerate(ldaOuput):
# reformat lda output for each word of topics
stateTopic[i] = ''.join(x for x in word if x.isalpha())
listOfStatesTopics.append(stateTopic)
## Visualisation
try:
vis = pyLDAvis.gensim.prepare(lda, corpus, dictionary)
pyLDAvis.save_html(vis, "elasticsearch/analyse/lda/lda-tfidf_" + str(state) + ".html")
except:
print("saving pyLDAvis failed. Nb of topics for " + state + ": " + nbTopicOptimal)
# Save file
listOfStatesTopicsCSV = pd.DataFrame(listOfStatesTopics)
listOfStatesTopicsCSV.to_csv("elasticsearch/analyse/lda/topicBySate.csv")
def wordnetCoverage(pdterms):
"""
add an additionnal column with boolean term is in wordnet
:param pdterms: pd.dataframes of terms. Must have a column with "terms" as a name
:return: pdterms with additionnal column with boolean term is in wordnet
"""
# Add a wordnet column boolean type : True if word is in wordnet, False otherwise
pdterms['wordnet'] = False
# Loop on terms and check if there are in wordnet
for index, row in pdterms.iterrows():
if len(wordnet.synsets(row['terms'])) != 0:
pdterms.at[index, 'wordnet'] = True
return pdterms
def sparqlquery(thesaurus, term):
"""
Sparql query. This methods have be factorize to be used in case of multiprocessign
:param thesaurus: which thesaurus to query ? agrovoc or mesh
:param term: term to align with thesaurus
:return: sparql result querry
"""
# Define MeSH sparql endpoint and query
endpointmesh = 'http://id.nlm.nih.gov/mesh/sparql'
qmesh = (
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>'
'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>'
'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>'
'PREFIX meshv: <http://id.nlm.nih.gov/mesh/vocab#>'
'PREFIX mesh: <http://id.nlm.nih.gov/mesh/>'
'PREFIX mesh2020: <http://id.nlm.nih.gov/mesh/2020/>'
'PREFIX mesh2019: <http://id.nlm.nih.gov/mesh/2019/>'
'PREFIX mesh2018: <http://id.nlm.nih.gov/mesh/2018/>'
''
'ask '
'FROM <http://id.nlm.nih.gov/mesh> '
'WHERE { '
' ?meshTerms a meshv:Term .'
' ?meshTerms meshv:prefLabel ?label .'
' FILTER(lang(?label) = "en").'
' filter(REGEX(?label, "^' + str(term) + '$", "i"))'
''
'}'
)
# Define agrovoc sparql endpoint and query
endpointagrovoc = 'http://agrovoc.uniroma2.it/sparql'
qagrovoc = ('PREFIX skos: <http://www.w3.org/2004/02/skos/core#> '
'PREFIX skosxl: <http://www.w3.org/2008/05/skos-xl#> '
'ask WHERE {'
'?myterm skosxl:literalForm ?labelAgro.'
'FILTER(lang(?labelAgro) = "en").'
'filter(REGEX(?labelAgro, "^' + str(term) + '(s)*$", "i"))'
'}')
# query mesh
if thesaurus == "agrovoc":
q = qagrovoc
endpoint = endpointagrovoc
elif thesaurus == "mesh":
q = qmesh
endpoint = endpointmesh
else:
raise Exception('Wrong thesaurus given')
try:
result = sparql.query(endpoint, q, timeout=30)
# Sometimes Endpoint can bug on a request.
# SparqlException raised by sparql-client if timeout is reach
# other exception (That I have not identify yet) when endpoint send non well formated answer
except:
result = "endpoint error"
return result
def agrovocCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in agrovoc
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a agrovoc column boolean type : True if terms is in Agrovoc
pdterms['agrovoc'] = False
# Loop on term
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="agrovoc"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('agrovoc', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'agrovoc'] = "Error"
elif result.hasresult():
pdterms.at[index, 'agrovoc'] = True
print("Agrovoc number of error: " + str(endpointerror))
return pdterms
def meshCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in MeSH
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a MeSH column boolean type : True if terms is in Mesh
pdterms['mesh'] = False
# Loop on term with multiprocessing
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="mesh"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('mesh', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'mesh'] = "Error"
elif result.hasresult():
pdterms.at[index, 'mesh'] = True
print("Mesh number of error: " + str(endpointerror))
return pdterms
def compareWithHTFIDF(number_of_term, dfToCompare, repToSave):
"""
Only used for ECIR2020 not for NLDB2021
:param number_of_term:
:param dfToCompare:
:param repToSave:
:return:
"""
# Stack / concatenate all terms from all states in one column
HTFIDFUniquedf = concatenateHTFIDFBiggestscore()[:number_of_term]
# select N first terms
dfToCompare = dfToCompare[:number_of_term]
common = pd.merge(dfToCompare, HTFIDFUniquedf, left_on='terms', right_on='terms', how='inner')
# del common['score']
common = common.terms.drop_duplicates()
common = common.reset_index()
del common['index']
common.to_csv("elasticsearch/analyse/" + repToSave + "/common.csv")
# Get what terms are specific to Adapt-TF-IDF
print(dfToCompare)
HTFIDFUniquedf['terms'][~HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])].dropna()
condition = HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])
specificHTFIDF = HTFIDFUniquedf.drop(HTFIDFUniquedf[condition].index)
specificHTFIDF = specificHTFIDF.reset_index()
del specificHTFIDF['index']
specificHTFIDF.to_csv("elasticsearch/analyse/" + repToSave + "/specific-H-TFIDF.csv")
# Get what terms are specific to dfToCompare
dfToCompare['terms'][~dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])].dropna()
condition = dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])
specificdfToCompare = dfToCompare.drop(dfToCompare[condition].index)
specificdfToCompare = specificdfToCompare.reset_index()
del specificdfToCompare['index']
specificdfToCompare.to_csv("elasticsearch/analyse/" + repToSave + "/specific-reference.csv")
# Print stats
percentIncommon = len(common) / len(HTFIDFUniquedf) * 100
percentOfSpecificHTFIDF = len(specificHTFIDF) / len(HTFIDFUniquedf) * 100
print("Percent in common " + str(percentIncommon))
print("Percent of specific at H-TFIDF : " + str(percentOfSpecificHTFIDF))
def HTFIDF_comparewith_TFIDF_TF():
"""
Only used for ECIR2020 not for NLDB2021
.. warnings:: /!\ under dev !!!. See TODO below
.. todo::
- Remove filter and pass it as args :
- period
- list of Cities
- Pass files path in args
- Pass number of term to extract for TF-IDF and TF
Gives commons and specifics terms between H-TFIDF and TF & TF-IDF classics
Creates 6 csv files : 3 for each classical measures :
- Common.csv : list of common terms
- specific-htfidf : terms only in H-TF-IDF
- specific-reference : terms only in one classical measurs
"""
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearchQuery()
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
col = ['tweets', 'created_at']
matrixAllTweets = pd.DataFrame(columns=col)
for tweetByCity in tweets.keys():
# pprint(tweets[tweetByCity])
# Filter cities :
if str(tweetByCity).split("_")[0] in listOfCity:
matrix = pd.DataFrame(tweets[tweetByCity])
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# NB : 28354 results instead of 44841 (from ES) because we work only on tweets with a city found
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
mask = ((matrixAllTweets["date"] >= tfidfPeriod.min()) & (matrixAllTweets["date"] <= tfidfPeriod.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(matrixAllTweets['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
### Remove stopword
for term in TFIDFClassical.keys():
if term in stopwords.words('english'):
del TFIDFClassical[term]
# TFIDFClassical.to_csv("elasticsearch/analyse/TFIDFClassical/tfidfclassical.csv")
## Extract N TOP ranking score
top_n = 500
extractBiggest = TFIDFClassical.stack().nlargest(top_n)
### Reset index becaus stack create a multi-index (2 level : old index + terms)
extractBiggest = extractBiggest.reset_index(level=[0, 1])
extractBiggest.columns = ['old-index', 'terms', 'score']
del extractBiggest['old-index']
extractBiggest = extractBiggest.drop_duplicates(subset='terms', keep="first")
extractBiggest.to_csv("elasticsearch/analyse/TFIDFClassical/TFIDFclassicalBiggestScore.csv")
# Compare with H-TFIDF
repToSave = "TFIDFClassical"
compareWithHTFIDF(200, extractBiggest, repToSave)
# Compute TF
tf = CountVectorizer()
tf.fit(matrixAllTweets['tweet'])
tf_res = tf.transform(matrixAllTweets['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### Remove stopword
for term in TFClassical.keys():
if term in stopwords.words('english'):
del TFClassical[term]
### save in file
# TFClassical.to_csv("elasticsearch/analyse/TFClassical/tfclassical.csv")
## Extract N TOP ranking score
top_n = 500
extractBiggestTF = TFClassical.stack().nlargest(top_n)
### Reset index becaus stack create a multi-index (2 level : old index + terms)
extractBiggestTF = extractBiggestTF.reset_index(level=[0, 1])
extractBiggestTF.columns = ['old-index', 'terms', 'score']
del extractBiggestTF['old-index']
extractBiggestTF = extractBiggestTF.drop_duplicates(subset='terms', keep="first")
extractBiggestTF.to_csv("elasticsearch/analyse/TFClassical/TFclassicalBiggestScore.csv")
# Compare with H-TFIDF
repToSave = "TFClassical"
compareWithHTFIDF(200, extractBiggestTF, repToSave)
def concatenateHTFIDFBiggestscore():
"""
This function return a dataframe of one column containing all terms. i.e regroup all terms
:param:
:return: dataframe of 1 column with all terms from states stacked
"""
HTFIDF = pd.read_csv('elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv', index_col=0)
# Transpose A-TF-IDF (inverse rows and columns)
HTFIDF = HTFIDF.transpose()
# group together all states' terms
HTFIDFUnique = pd.Series(dtype='string')
## loop on row for append states' terms in order to take into account their rank
## If there are 4 states, It will add the 4 first terms by iterow
for index, row in HTFIDF.iterrows():
HTFIDFUnique = HTFIDFUnique.append(row.transpose(), ignore_index=True)
## drop duplicate
HTFIDFUnique = HTFIDFUnique.drop_duplicates()
# merge to see what terms have in common
## convert series into dataframe before merge
HTFIDFUniquedf = HTFIDFUnique.to_frame().rename(columns={0: 'terms'})
HTFIDFUniquedf['terms'] = HTFIDFUnique
return HTFIDFUniquedf
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
### Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
## period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def compute_occurence_word_by_state():
"""
Count words for tweets aggregate by state.
For each state, we concatenate all tweets related.
Then we build a table :
- columns : all word (our vocabulary)
- row : the 4 states of UK
- cell : occurence of the word by state
:return: pd.Dataframe of occurence of word by states
"""
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
## Compute a table : (row : state; column: occurence of each terms present in state's tweets)
es_tweets_results = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
es_tweets_results_filtred = spatiotemporelFilter(es_tweets_results, listOfcities=listOfCity, spatialLevel='state',
period=tfidfPeriod)
## Aggregate by state
### Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
es_tweets_results_filtred["city"], es_tweets_results_filtred["state"], es_tweets_results_filtred["country"], \
es_tweets_results_filtred["date"] = zip(*es_tweets_results_filtred.index.map(splitindex))
es_tweets_results_filtred_aggstate = es_tweets_results_filtred.groupby("state").sum()
return es_tweets_results_filtred_aggstate
def get_tweets_by_terms(term):
"""
Return tweets content containing the term for Eval 11
Warning: Only work on
- the spatial window : capital of UK
- the temporal windows : 2020-01-22 to 30
Todo:
- if you want to generelized this method at ohter spatial & temporal windows. You have to custom the
elastic serarch query.
:param term: term for retrieving tweets
:return: Dictionnary of tweets for the term
"""
list_of_tweets = []
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Define a Query : Here get only city from UK
query = {"query": {
"bool": {
"must": [],
"filter": [
{
"bool": {
"filter": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "London"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Glasgow"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Belfast"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"rest.features.properties.city.keyword": "Cardiff"
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"full_text": term
}
}
],
"minimum_should_match": 1
}
}
]
}
},
{
"range": {
"created_at": {
"gte": "2020-01-22T23:00:00.000Z",
"lte": "2020-01-30T23:00:00.000Z",
"format": "strict_date_optional_time"
}
}
}
],
}
}
}
try:
result = Elasticsearch.search(client, index=index, body=query, size=10000)
except Exception as e:
print("Elasticsearch deamon may not be launched for term: " + term)
print(e)
result = ""
for hit in result['hits']['hits']:
content = hit["_source"]["full_text"]
state = hit["_source"]["rest"]["features"][0]["properties"]["state"]
tweet = {
"full_text": content,
"state": state
}
list_of_tweets.append(tweet)
return list_of_tweets
def get_nb_of_tweets_with_spatio_temporal_filter():
"""
Return tweets content containing the term for Eval 11
Warning: Only work on
- the spatial window : capital of UK
- the temporal windows : 2020-01-22 to 30
Todo:
- if you want to generelized this method at ohter spatial & temporal windows. You have to custom the
elastic serarch query.
:param term: term for retrieving tweets
:return: Dictionnary of nb of tweets by state
"""
list_of_tweets = []
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Define a Query : Here get only city from UK
query = {"query": {
"bool": {
"must": [],
"filter": [
{
"bool": {
"filter": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "London"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Glasgow"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Belfast"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"rest.features.properties.city.keyword": "Cardiff"
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
},
]
}
},
{
"range": {
"created_at": {
"gte": "2020-01-22T23:00:00.000Z",
"lte": "2020-01-30T23:00:00.000Z",
"format": "strict_date_optional_time"
}
}
}
],
}
}
}
try:
result = Elasticsearch.search(client, index=index, body=query, size=10000)
except Exception as e:
print("Elasticsearch deamon may not be launched")
print(e)
result = ""
nb_tweets_by_state = pd.DataFrame(index=["nb_tweets"], columns=('England', 'Northern Ireland', 'Scotland', 'Wales'))
nb_tweets_by_state.iloc[0] = (0, 0, 0, 0)
list_of_unboundaries_state = []
for hit in result['hits']['hits']:
try:
state = hit["_source"]["rest"]["features"][0]["properties"]["state"]
nb_tweets_by_state[state].iloc[0] += 1
except:
state_no_uk = str(hit["_source"]["rest"]["features"][0]["properties"]["city"] + " " + state)
list_of_unboundaries_state.append(state_no_uk)
print("get_nb_of_tweets_with_spatio_temporal_filter(): List of unique location outside of UK: " + str(
set(list_of_unboundaries_state)))
return nb_tweets_by_state
def ECIR20():
# matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
"""
### Filter city and period
"""
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
# LDA clustering on TF-IDF adaptative vocabulary
listOfCityState = ['London_England', 'Glasgow_Scotland', 'Belfast_Northern Ireland', 'Cardiff_Wales']
ldHHTFIDF(listOfCityState)
"""
"""
## Build biotex input for adaptative level state
biotexAdaptativeBuilderAdaptative(listOfcities=listOfCity, spatialLevel='state',
period=tfidfPeriod, temporalLevel='day')
"""
# Compare Biotex with H-TFIDF
"""
biotex = pd.read_csv('elasticsearch/analyse/biotexonhiccs/biotexUKbyStates.csv',
names=['terms', 'UMLS', 'score'], sep=';')
repToSave = "biotexonhiccs"
compareWithHTFIDF(200, biotex, repToSave)
"""
# declare path for comparison H-TFIDF with TF-IDF and TF (scikit measures)
"""
tfidfpath = "elasticsearch/analyse/TFIDFClassical/TFIDFclassicalBiggestScore.csv"
tfpath = "elasticsearch/analyse/TFClassical/TFclassicalBiggestScore.csv"
"""
"""
# Compare classical TF-IDF with H-TFIDF
## HTFIDF_comparewith_TFIDF_TF() gives commun and spectific terms between H-TFIDF and TF-ISF & TF classics
HTFIDF_comparewith_TFIDF_TF()
"""
# Thesaurus coverage : Are the terms in Wordnet / Agrovoc / MeSH
## open measures results and add a column for each thesaurus
### TF-IDF
"""
tfidf =
|
pd.read_csv(tfidfpath)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""Supports F10.7 index values. Downloads data from LASP and the SWPC.
Properties
----------
platform
'sw'
name
'f107'
tag
- 'historic' LASP F10.7 data (downloads by month, loads by day)
- 'prelim' Preliminary SWPC daily solar indices
- 'daily' Daily SWPC solar indices (contains last 30 days)
- 'forecast' Grab forecast data from SWPC (next 3 days)
- '45day' 45-Day Forecast data from the Air Force
Example
-------
Download and load all of the historic F10.7 data. Note that it will not
stop on the current date, but a point in the past when post-processing has
been successfully completed.
::
f107 = pysat.Instrument('sw', 'f107', tag='historic')
f107.download(start=f107.lasp_stime, stop=f107.today(), freq='MS')
f107.load(date=f107.lasp_stime, end_date=f107.today())
Note
----
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
f107 = pysat.Instrument('sw', 'f107', tag='forecast')
f107.download()
f107.load(date=f107.tomorrow())
Warnings
--------
The 'forecast' F10.7 data loads three days at a time. Loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for 'forecast'
data.
Like 'forecast', the '45day' forecast loads a specific period of time (45 days)
and subsequent files contain overlapping data. Thus, loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for '45day' data.
"""
import datetime as dt
import ftplib
import json
import numpy as np
import os
import requests
import sys
import warnings
import pandas as pds
import pysat
from pysatSpaceWeather.instruments.methods import f107 as mm_f107
from pysatSpaceWeather.instruments.methods.ace import load_csv_data
from pysatSpaceWeather.instruments.methods import general
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'sw'
name = 'f107'
tags = {'historic': 'Daily LASP value of F10.7',
'prelim': 'Preliminary SWPC daily solar indices',
'daily': 'Daily SWPC solar indices (contains last 30 days)',
'forecast': 'SWPC Forecast F107 data next (3 days)',
'45day': 'Air Force 45-day Forecast'}
# Dict keyed by inst_id that lists supported tags for each inst_id
inst_ids = {'': [tag for tag in tags.keys()]}
# Dict keyed by inst_id that lists supported tags and a good day of test data
# generate todays date to support loading forecast data
now = dt.datetime.utcnow()
today = dt.datetime(now.year, now.month, now.day)
tomorrow = today + pds.DateOffset(days=1)
# The LASP archive start day is also important
lasp_stime = dt.datetime(1947, 2, 14)
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'historic': dt.datetime(2009, 1, 1),
'prelim': dt.datetime(2009, 1, 1),
'daily': tomorrow,
'forecast': tomorrow,
'45day': tomorrow}}
# Other tags assumed to be True
_test_download_travis = {'': {'prelim': False}}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
self.acknowledgements = mm_f107.acknowledgements(self.name, self.tag)
self.references = mm_f107.references(self.name, self.tag)
logger.info(self.acknowledgements)
# Define the historic F10.7 starting time
if self.tag == 'historic':
self.lasp_stime = lasp_stime
return
def clean(self):
""" Cleaning function for Space Weather indices
Note
----
F10.7 doesn't require cleaning
"""
return
# ----------------------------------------------------------------------------
# Instrument functions
def load(fnames, tag=None, inst_id=None):
"""Load F10.7 index files
Parameters
----------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
satellite id or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Get the desired file dates and file names from the daily indexed list
file_dates = list()
if tag in ['historic', 'prelim']:
unique_files = list()
for fname in fnames:
file_dates.append(dt.datetime.strptime(fname[-10:], '%Y-%m-%d'))
if fname[0:-11] not in unique_files:
unique_files.append(fname[0:-11])
fnames = unique_files
# Load the CSV data files
data = load_csv_data(fnames, read_csv_kwargs={"index_col": 0,
"parse_dates": True})
# If there is a date range, downselect here
if len(file_dates) > 0:
idx, = np.where((data.index >= min(file_dates))
& (data.index < max(file_dates) + dt.timedelta(days=1)))
data = data.iloc[idx, :]
# Initialize the metadata
meta = pysat.Meta()
meta['f107'] = {meta.labels.units: 'SFU',
meta.labels.name: 'F10.7 cm solar index',
meta.labels.notes: '',
meta.labels.desc:
'F10.7 cm radio flux in Solar Flux Units (SFU)',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
if tag == '45day':
meta['ap'] = {meta.labels.units: '',
meta.labels.name: 'Daily Ap index',
meta.labels.notes: '',
meta.labels.desc: 'Daily average of 3-h ap indices',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 400}
elif tag == 'daily' or tag == 'prelim':
meta['ssn'] = {meta.labels.units: '',
meta.labels.name: 'Sunspot Number',
meta.labels.notes: '',
meta.labels.desc: 'SESC Sunspot Number',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['ss_area'] = {meta.labels.units: '10$^-6$ Solar Hemisphere',
meta.labels.name: 'Sunspot Area',
meta.labels.notes: '',
meta.labels.desc:
''.join(['Sunspot Area in Millionths of the ',
'Visible Hemisphere']),
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: 1.0e6}
meta['new_reg'] = {meta.labels.units: '',
meta.labels.name: 'New Regions',
meta.labels.notes: '',
meta.labels.desc: 'New active solar regions',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['smf'] = {meta.labels.units: 'G',
meta.labels.name: 'Solar Mean Field',
meta.labels.notes: '',
meta.labels.desc: 'Standford Solar Mean Field',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['goes_bgd_flux'] = {meta.labels.units: 'W/m^2',
meta.labels.name: 'X-ray Background Flux',
meta.labels.notes: '',
meta.labels.desc:
'GOES15 X-ray Background Flux',
meta.labels.fill_val: '*',
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['c_flare'] = {meta.labels.units: '',
meta.labels.name: 'C X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'C-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['m_flare'] = {meta.labels.units: '',
meta.labels.name: 'M X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'M-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['x_flare'] = {meta.labels.units: '',
meta.labels.name: 'X X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'X-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o1_flare'] = {meta.labels.units: '',
meta.labels.name: '1 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '1-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o2_flare'] = {meta.labels.units: '',
meta.labels.name: '2 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '2-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o3_flare'] = {meta.labels.units: '',
meta.labels.name: '3 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '3-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
return data, meta
def list_files(tag=None, inst_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for F10.7 data
Parameters
----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
-------
out_files : pysat._files.Files
A class containing the verified available files
Note
----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == 'historic':
# Files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'f107_monthly_{year:04d}-{month:02d}.txt'
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
out_files.loc[out_files.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out_files.iloc[-1]
out_files = out_files.asfreq('D', 'pad')
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag == 'prelim':
# Files are by year (and quarter)
if format_str is None:
format_str = ''.join(['f107_prelim_{year:04d}_{month:02d}',
'_v{version:01d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
# Set each file's valid length at a 1-day resolution
orig_files = out_files.sort_index().copy()
new_files = list()
for orig in orig_files.iteritems():
# Version determines each file's valid length
version = int(orig[1].split("_v")[1][0])
doff = pds.DateOffset(years=1) if version == 2 \
else pds.DateOffset(months=3)
istart = orig[0]
iend = istart + doff - pds.DateOffset(days=1)
# Ensure the end time does not extend past the number of
# possible days included based on the file's download time
fname = os.path.join(data_path, orig[1])
dend = dt.datetime.utcfromtimestamp(os.path.getctime(fname))
dend = dend - pds.DateOffset(days=1)
if dend < iend:
iend = dend
# Pad the original file index
out_files.loc[iend] = orig[1]
out_files = out_files.sort_index()
# Save the files at a daily cadence over the desired period
new_files.append(out_files.loc[istart:
iend].asfreq('D', 'pad'))
# Add the newly indexed files to the file output
out_files = pds.concat(new_files, sort=True)
out_files = out_files.dropna()
out_files = out_files.sort_index()
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag in ['daily', 'forecast', '45day']:
format_str = ''.join(['f107_', tag,
'_{year:04d}-{month:02d}-{day:02d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# Pad list of files data to include most recent file under tomorrow
if not out_files.empty:
pds_off = pds.DateOffset(days=1)
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
else:
raise ValueError(' '.join(('Unrecognized tag name for Space',
'Weather Index F107:', tag)))
else:
raise ValueError(' '.join(('A data_path must be passed to the loading',
'routine for F107')))
return out_files
def download(date_array, tag, inst_id, data_path, update_files=False):
"""Routine to download F107 index data
Parameters
-----------
date_array : list-like
Sequence of dates to download date for.
tag : string or NoneType
Denotes type of file to load.
inst_id : string or NoneType
Specifies the satellite ID for a constellation.
data_path : string or NoneType
Path to data directory.
update_files : bool
Re-download data for files that already exist if True (default=False)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == 'historic':
# Test the date array, updating it if necessary
if date_array.freq != 'MS':
warnings.warn(''.join(['Historic F10.7 downloads should be invoked',
" with the `freq='MS'` option."]))
date_array = pysat.utils.time.create_date_range(
dt.datetime(date_array[0].year, date_array[0].month, 1),
date_array[-1], freq='MS')
# Download from LASP, by month
for dl_date in date_array:
# Create the name to which the local file will be saved
str_date = dl_date.strftime('%Y-%m')
data_file = os.path.join(data_path,
'f107_monthly_{:s}.txt'.format(str_date))
if update_files or not os.path.isfile(data_file):
# Set the download webpage
dstr = ''.join(['http://lasp.colorado.edu/lisird/latis/dap/',
'noaa_radio_flux.json?time%3E=',
dl_date.strftime('%Y-%m-%d'),
'T00:00:00.000Z&time%3C=',
(dl_date + pds.DateOffset(months=1)
- pds.DateOffset(days=1)).strftime('%Y-%m-%d'),
'T00:00:00.000Z'])
# The data is returned as a JSON file
req = requests.get(dstr)
# Process the JSON file
raw_dict = json.loads(req.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
if data.empty:
warnings.warn("no data for {:}".format(dl_date),
UserWarning)
else:
# The file format changed over time
try:
# This is the new data format
times = [dt.datetime.strptime(time, '%Y%m%d')
for time in data.pop('time')]
except ValueError:
# Accepts old file formats
times = [dt.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# Replace fill value with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx, :] = np.nan
# Create a local CSV file
data.to_csv(data_file, header=True)
elif tag == 'prelim':
ftp = ftplib.FTP('ftp.swpc.noaa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd <PASSWORD>@
ftp.cwd('/pub/indices/old_indices')
bad_fname = list()
# Get the local files, to ensure that the version 1 files are
# downloaded again if more data has been added
local_files = list_files(tag, inst_id, data_path)
# To avoid downloading multiple files, cycle dates based on file length
dl_date = date_array[0]
while dl_date <= date_array[-1]:
# The file name changes, depending on how recent the requested
# data is
qnum = (dl_date.month - 1) // 3 + 1 # Integer floor division
qmonth = (qnum - 1) * 3 + 1
quar = 'Q{:d}_'.format(qnum)
fnames = ['{:04d}{:s}DSD.txt'.format(dl_date.year, ss)
for ss in ['_', quar]]
versions = ["01_v2", "{:02d}_v1".format(qmonth)]
vend = [dt.datetime(dl_date.year, 12, 31),
dt.datetime(dl_date.year, qmonth, 1)
+ pds.DateOffset(months=3) - pds.DateOffset(days=1)]
downloaded = False
rewritten = False
# Attempt the download(s)
for iname, fname in enumerate(fnames):
# Test to see if we already tried this filename
if fname in bad_fname:
continue
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
ofile = '_'.join(['f107', 'prelim',
'{:04d}'.format(dl_date.year),
'{:s}.txt'.format(versions[iname])])
outfile = os.path.join(data_path, ofile)
if os.path.isfile(outfile):
downloaded = True
# Check the date to see if this should be rewritten
checkfile = os.path.split(outfile)[-1]
has_file = local_files == checkfile
if np.any(has_file):
if has_file[has_file].index[-1] < vend[iname]:
# This file will be updated again, but only attempt
# to do so if enough time has passed from the
# last time it was downloaded
yesterday = today - pds.DateOffset(days=1)
if has_file[has_file].index[-1] < yesterday:
rewritten = True
else:
# The file does not exist, if it can be downloaded, it
# should be 'rewritten'
rewritten = True
# Attempt to download if the file does not exist or if the
# file has been updated
if rewritten or not downloaded:
try:
sys.stdout.flush()
ftp.retrbinary('RETR ' + fname,
open(saved_fname, 'wb').write)
downloaded = True
logger.info(' '.join(('Downloaded file for ',
dl_date.strftime('%x'))))
except ftplib.error_perm as exception:
# Could not fetch, so cannot rewrite
rewritten = False
# Test for an error
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise RuntimeError(exception)
else:
# file isn't actually there, try the next name
os.remove(saved_fname)
# Save this so we don't try again
# Because there are two possible filenames for
# each time, it's ok if one isn't there. We just
# don't want to keep looking for it.
bad_fname.append(fname)
# If the first file worked, don't try again
if downloaded:
break
if not downloaded:
logger.info(' '.join(('File not available for',
dl_date.strftime('%x'))))
elif rewritten:
with open(saved_fname, 'r') as fprelim:
lines = fprelim.read()
mm_f107.rewrite_daily_file(dl_date.year, outfile, lines)
os.remove(saved_fname)
# Cycle to the next date
dl_date = vend[iname] + pds.DateOffset(days=1)
# Close connection after downloading all dates
ftp.close()
elif tag == 'daily':
logger.info('This routine can only download the latest 30 day file')
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/daily-solar-indices.txt'
req = requests.get(furl)
# Save the output
data_file = 'f107_daily_{:s}.txt'.format(today.strftime('%Y-%m-%d'))
outfile = os.path.join(data_path, data_file)
mm_f107.rewrite_daily_file(today.year, outfile, req.text)
elif tag == 'forecast':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = ''.join(('https://services.swpc.noaa.gov/text/',
'3-day-solar-geomag-predictions.txt'))
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get starting date of the forecasts
raw_data = req.text.split(':Prediction_dates:')[-1]
forecast_date = dt.datetime.strptime(raw_data[3:14], '%Y %b %d')
# Set the times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# String data is the forecast value for the next three days
raw_data = req.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# Put data into nicer DataFrame
data =
|
pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
|
pandas.DataFrame
|
import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked = pd.DataFrame()
df_alldistances_aggregated_mapunstacked = pd.DataFrame()
df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame()
for cluster in self.markerproteins.keys():
# collect data irrespective of coverage
df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster)
df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered)
# filter for coverage and calculate distances
df_cluster = df_cluster_unfiltered.dropna()
if len(df_cluster) == 0:
continue
df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster)
df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual)
df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated)
if len(df_alldistances_individual_mapfracunstacked) == 0:
self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"])
self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"])
self.genenames_sortedout_list = "No clusters found"
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
else:
df_alldistances_aggregated_mapunstacked.columns.name = "Map"
## Get compatibility with plotting functions, by mimicking assignment of old functions:
# old output of distance_calculation
self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1)
self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json()
# old output of multiple_iterations
# self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by:
self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked
# kept for testing of quantification table:
self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map")
# same as before, but now already abs
self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map")
df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction")
df_dist_to_median.name = "distance"
df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index))
self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json()
self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names")
if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")]
return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked
def get_marker_proteins_unfiltered(self, cluster):
"""
This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage.
Args:
cluster: str, cluster name, should be one of self.markerproteins.keys()
self attributes:
df_01_stacked: df, contains the fully stacked 0-1 normalized data
markerproteins: dict, contains marker protein assignments
Returns:
df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked.
self attribtues:
None
"""
df_in = self.df_01_stacked["normalized profile"].unstack("Fraction")
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster_unfiltered = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster_unfiltered = df_cluster_unfiltered.append(df_p)
if len(df_cluster_unfiltered) == 0:
return df_cluster_unfiltered
# Unstack maps and add Cluster to index
df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map")
df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True)
return df_cluster_unfiltered
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
"""
Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster.
Per default this is the manhattan distance to the median profile.
Args:
df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format.
complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median.
distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError.
self attributes:
None
Returns:
df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances.
df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference.
self attribtues:
None
"""
df_distances_aggregated = pd.DataFrame()
ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T
df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1)
# loop over maps
maps = set(df_cluster.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m =
|
pd.DataFrame(d_m, columns=[m], index=df_cluster.index)
|
pandas.DataFrame
|
import json
import os
import string
from tempfile import TemporaryDirectory
from unittest import TestCase
import pandas as pd
from hypothesis import (
given,
settings,
HealthCheck,
)
import hypothesis.strategies as st
from hypothesis.strategies import (
just,
integers,
)
from oasislmf.preparation.summaries import write_exposure_summary
from oasislmf.preparation.summaries import get_exposure_summary
from oasislmf.preparation.gul_inputs import get_gul_input_items
from oasislmf.utils.coverages import SUPPORTED_COVERAGE_TYPES
from oasislmf.utils.defaults import get_default_exposure_profile
from oasislmf.utils.data import get_location_df, get_ids
from oasislmf.utils.status import OASIS_KEYS_STATUS_MODELLED
from tests.data import (
keys,
min_source_exposure,
write_source_files,
write_keys_files,
)
# https://towardsdatascience.com/automating-unit-tests-in-python-with-hypothesis-d53affdc1eba
class TestSummaries(TestCase):
def assertDictAlmostEqual(self, d1, d2, msg=None, places=7):
# check if both inputs are dicts
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
# check if both inputs have the same keys
self.assertEqual(d1.keys(), d2.keys())
# check each key
for key, value in d1.items():
if isinstance(value, dict):
self.assertDictAlmostEqual(d1[key], d2[key], msg=msg)
else:
self.assertAlmostEqual(d1[key], d2[key], places=places, msg=msg)
def assertSummaryIsValid(self, loc_df, gul_inputs, exp_summary, perils_expected=None):
cov_types = ['buildings', 'other', 'bi', 'contents']
lookup_status = ['success', 'fail', 'nomatch', 'fail_ap', 'fail_v', 'notatrisk']
loc_rename_cols = {
'bitiv': 'bi',
'buildingtiv': 'buildings',
'contentstiv': 'contents',
'othertiv': 'other'
}
# Check each returned peril
for peril in perils_expected:
peril_summary = exp_summary[peril]
# Check the 'All' section
supported_tivs = loc_df[['buildingtiv', 'othertiv', 'bitiv', 'contentstiv']].sum(0).rename(loc_rename_cols)
self.assertAlmostEqual(supported_tivs.sum(0), peril_summary['all']['tiv'])
for cov in cov_types:
self.assertAlmostEqual(supported_tivs[cov], peril_summary['all']['tiv_by_coverage'][cov])
# Check each lookup status
peril_expected = gul_inputs[gul_inputs.peril_id == peril]
for status in lookup_status:
peril_status = peril_expected[peril_expected.status == status]
self.assertAlmostEqual(peril_status.tiv.sum(), peril_summary[status]['tiv'])
self.assertEqual(len(peril_status.loc_id.unique()), peril_summary[status]['number_of_locations'])
for cov in cov_types:
cov_type_id = SUPPORTED_COVERAGE_TYPES[cov]['id']
cov_type_tiv = peril_status[peril_status.coverage_type_id == cov_type_id].tiv.sum()
self.assertAlmostEqual(cov_type_tiv, peril_summary[status]['tiv_by_coverage'][cov])
# Check 'noreturn' status
tiv_returned = sum([s[1]['tiv'] for s in peril_summary.items() if s[0] in lookup_status])
self.assertAlmostEqual(peril_summary['all']['tiv'] - tiv_returned, peril_summary['noreturn']['tiv'])
for cov in cov_types:
cov_tiv_returned = sum(
[s[1]['tiv_by_coverage'][cov] for s in peril_summary.items() if s[0] in lookup_status])
self.assertAlmostEqual(peril_summary['all']['tiv_by_coverage'][cov] - cov_tiv_returned, peril_summary['noreturn']['tiv_by_coverage'][cov])
@given(st.data())
@settings(max_examples=20, deadline=None)
def test_single_peril__totals_correct(self, data):
# Shared Values between Loc / keys
loc_size = data.draw(integers(10, 20))
supported_cov = data.draw(st.lists(integers(1,4), unique=True, min_size=1, max_size=4))
perils = 'WTC'
# Create Mock keys_df
keys_data = list()
for i in supported_cov:
keys_data += data.draw(keys(
size=loc_size,
from_peril_ids=just(perils),
from_coverage_type_ids=just(i),
from_area_peril_ids=just(1),
from_vulnerability_ids=just(1),
from_messages=just('str')))
keys_df =
|
pd.DataFrame.from_dict(keys_data)
|
pandas.DataFrame.from_dict
|
# ----------------------------------------------------------------------------
# Copyright (c) 2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
import numpy as np
from skbio.stats.distance import DistanceMatrix
from qiime2.plugin.testing import TestPluginBase
from qiime2.plugin import ValidationError
from qiime2 import Metadata
from q2_fmt._engraftment import group_timepoints
from q2_fmt._stats import wilcoxon_srt, mann_whitney_u
from q2_fmt._examples import (faithpd_timedist_factory,
faithpd_refdist_factory)
from q2_fmt._validator import (validate_all_dist_columns_present,
validate_unique_subjects_within_group)
class TestBase(TestPluginBase):
package = 'q2_fmt.tests'
def setUp(self):
super().setUp()
self.md_beta = Metadata.load(self.get_data_path(
'sample_metadata_donors.tsv'))
self.md_alpha = Metadata.load(self.get_data_path(
'sample_metadata_alpha_div.tsv'))
self.dm = DistanceMatrix.read(self.get_data_path(
'dist_matrix_donors.tsv')).to_series()
self.alpha = pd.read_csv(self.get_data_path('alpha_div.tsv'),
sep='\t', index_col=0, squeeze=True)
self.faithpd_timedist = faithpd_timedist_factory().view(pd.DataFrame)
self.faithpd_refdist = faithpd_refdist_factory().view(pd.DataFrame)
class ErrorMixins:
def test_with_time_column_input_not_in_metadata(self):
with self.assertRaisesRegex(ValueError,
'time_column.*foo.*metadata'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='foo',
reference_column='relevant_donor',
control_column='control')
def test_with_reference_column_input_not_in_metadata(self):
with self.assertRaisesRegex(ValueError,
'reference_column.*foo.*metadata'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='days_post_transplant',
reference_column='foo',
control_column='control')
def test_with_control_column_input_not_in_metadata(self):
with self.assertRaisesRegex(ValueError,
'control_column.*foo.*metadata'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='days_post_transplant',
reference_column='relevant_donor',
control_column='foo')
def test_with_non_numeric_time_column(self):
with self.assertRaisesRegex(ValueError,
'time_column.*categorical.*numeric'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='non_numeric_time_column',
reference_column='relevant_donor',
control_column='control')
class TestAlphaErrors(TestBase, ErrorMixins):
def setUp(self):
super().setUp()
self.div = self.alpha
self.md = self.md_alpha
class TestBetaErrors(TestBase, ErrorMixins):
def setUp(self):
super().setUp()
self.div = self.dm
self.md = self.md_beta
class TestGroupTimepoints(TestBase):
# Beta Diversity (Distance Matrix) Test Cases
def test_beta_dists_with_donors_and_controls(self):
exp_time_df = pd.DataFrame({
'id': ['sampleA', 'sampleB', 'sampleC', 'sampleD', 'sampleE'],
'measure': [0.45, 0.40, 0.28, 0.78, 0.66],
'group': [7.0, 7.0, 9.0, 11.0, 11.0]
})
exp_ref_df = pd.DataFrame({
'id': ['donor1..donor2', 'donor1..donor3', 'donor2..donor3',
'sampleB..sampleC', 'sampleB..sampleD', 'sampleC..sampleD'],
'measure': [0.24, 0.41, 0.74, 0.37, 0.44, 0.31],
'group': ['reference', 'reference', 'reference',
'control1', 'control1', 'control1'],
'A': ['donor1', 'donor1', 'donor2',
'sampleB', 'sampleB', 'sampleC'],
'B': ['donor2', 'donor3', 'donor3',
'sampleC', 'sampleD', 'sampleD']
})
time_df, ref_df = group_timepoints(diversity_measure=self.dm,
metadata=self.md_beta,
time_column='days_post_transplant',
reference_column='relevant_donor',
control_column='control')
pd.testing.assert_frame_equal(time_df, exp_time_df)
pd.testing.assert_frame_equal(ref_df, exp_ref_df)
def test_beta_dists_with_donors_controls_and_subjects(self):
exp_time_df = pd.DataFrame({
'id': ['sampleA', 'sampleB', 'sampleC', 'sampleD', 'sampleE'],
'measure': [0.45, 0.40, 0.28, 0.78, 0.66],
'group': [7.0, 7.0, 9.0, 11.0, 11.0],
'subject': ['subject1', 'subject2',
'subject1', 'subject1', 'subject2']
})
exp_ref_df = pd.DataFrame({
'id': ['donor1..donor2', 'donor1..donor3', 'donor2..donor3',
'sampleB..sampleC', 'sampleB..sampleD', 'sampleC..sampleD'],
'measure': [0.24, 0.41, 0.74, 0.37, 0.44, 0.31],
'group': ['reference', 'reference', 'reference',
'control1', 'control1', 'control1'],
'A': ['donor1', 'donor1', 'donor2',
'sampleB', 'sampleB', 'sampleC'],
'B': ['donor2', 'donor3', 'donor3',
'sampleC', 'sampleD', 'sampleD']
})
time_df, ref_df = group_timepoints(diversity_measure=self.dm,
metadata=self.md_beta,
time_column='days_post_transplant',
reference_column='relevant_donor',
control_column='control',
subject_column='subject')
pd.testing.assert_frame_equal(time_df, exp_time_df)
|
pd.testing.assert_frame_equal(ref_df, exp_ref_df)
|
pandas.testing.assert_frame_equal
|
"""
This RNN is used for predicting stock trends of the Google stock.
@Editor: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Part 1 - Data Preprocessing
# Training set is only used, not test set.
# Once training is done, then the test set will be introduced.
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
# This creates a numpy array rather than a simple vector
# This grabs the open column and makes it into a numpy array
training_set = dataset_train.iloc[:,1:2].values
#This works because the stock prices will be normalized between 0 & 1
sc = MinMaxScaler(feature_range = (0,1), copy=True)
#This method fits the data (finds min & max) to apply normalization
# The transform methods computes the standardized
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 time steps and 1 output
# the 60 time steps is a tested value, that has to be iterated over the model to find.
# This means that each day looks at the three previous months to predict the price
x_train = []
y_train = []
# the 60 refers to the three months, 1258 is total # of prices
for i in range(60, 1258):
# This gets the range of all of the 60 last stock prices
x_train.append(training_set_scaled[i-60:i,0])
#uses the next stock price as the output prediction
y_train.append(training_set_scaled[i,0])
# This converts the data to user arrays for tensorflow
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshaping, only done to inputs to add dimensions
# Use the keras library for this
# input dimensions can refer to the same stock stats, or comparing stocks
# the shape function gets the size of the axis specified, can use more than 2 dimensions
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# Part 2 - Building the RNN
# This is a stacked LSTM with dropout regularization
# This is called regressor due to its continuous output
# Regression is for predicting continuous, classification is for predicting finite output
regressor = Sequential()
# Adding the first LSTM Layer and some dropout regularization
# Units -> The number of LSTM cells or modules in the layer
# Return Sequences -> True, because it will have several LSTM layers. False when you're on the last layer
# Input Shape -> Shape of the input containing x_train
# High dimensionality and lots of neurons in each will help the accuracy
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
# second step of first LSTM Layer is to add dropout regularization
# Classic # is 20%, aka 20% of neurons will be ignored in forward and backware propagation
regressor.add(Dropout(rate=0.20))
# Add a second LSTM layer with dropout regularization
# because this is the second layer, input layer is not required
# 50 neurons in previous layer is assumed
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(rate=0.20))
# Add a third LSTM layer with dropout regularization
# Same as second LSTM layer -> both are middle, hidden layers
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(rate=0.20))
# Add a fourth LSTM Layer with dropout regularization
# 50 units stays the same because this is not the final layer.
# Output layer to follow for the one continuous output of the regression
# Return sequences should be false because no more LSTM modules present
regressor.add(LSTM(units=50))
regressor.add(Dropout(rate=0.20))
# Add a classic fully connected, output layer
# 1 output unit corresponds to the
regressor.add(Dense(units=1))
# Compile the RNN
# RMS prop is recommended for RNN's but adam used here
# Loss function is mean squared error
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fit the RNN
# X & Y are the input numpy arrays and output numpy arrays
# Batch_size
regressor.fit(x=x_train, y=y_train, batch_size=32, epochs=100)
# Part 3 - Making the predictions and visualizing the results
# Avoid overfitting of the training set because then it won't have enough variance to recognize other test sets
# Getting the real stock price open of 2017
actual_results = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = actual_results.iloc[:, 1:2].values
# Getting the predicted stock price of 2017
# Need the 60 previous days to create a concatenated data
dataset_total =
|
pd.concat((dataset_train['Open'], actual_results['Open']), axis=0)
|
pandas.concat
|
import os
import re
import json
import abc
import warnings
from typing import MutableMapping, List, Union
from functools import reduce
from enum import Enum
import pandas as pd
import numpy as np
from scipy import sparse
import loompy as lp
from loomxpy import __DEBUG__
from loomxpy._specifications import (
ProjectionMethod,
LoomXMetadataEmbedding,
LoomXMetadataClustering,
LoomXMetadataCluster,
LoomXMetadataClusterMarkerMetric,
)
from loomxpy._s7 import S7
from loomxpy._errors import BadDTypeException
from loomxpy._hooks import WithInitHook
from loomxpy._matrix import DataMatrix
from loomxpy.utils import df_to_named_matrix, compress_encode
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + "\n"
warnings.formatwarning = custom_formatwarning
##########################################
# MODES #
##########################################
class ModeType(Enum):
NONE = "_"
RNA = "rna"
class Mode(S7):
def __init__(self, mode_type: ModeType, data_matrix: DataMatrix):
"""
constructor for Mode
"""
self._mode_type = mode_type
# Data Matrix
self._data_matrix = data_matrix
# Global
self._global_attrs = GlobalAttributes(mode=self)
# Features
self._feature_attrs = FeatureAttributes(mode=self)
self._fa_annotations = FeatureAnnotationAttributes(mode=self)
self._fa_metrics = FeatureMetricAttributes(mode=self)
# Observations
self._observation_attrs = ObservationAttributes(mode=self)
self._oa_annotations = ObservationAnnotationAttributes(mode=self)
self._oa_metrics = ObservationMetricAttributes(mode=self)
self._oa_embeddings = ObservationEmbeddingAttributes(mode=self)
self._oa_clusterings = ObservationClusteringAttributes(mode=self)
@property
def X(self):
return self._data_matrix
@property
def g(self):
return self._global_attrs
@property
def f(self):
return self._feature_attrs
@property
def o(self):
return self._observation_attrs
def export(
self,
filename: str,
output_format: str,
title: str = None,
genome: str = None,
compress_metadata: bool = False,
cluster_marker_metrics: List[LoomXMetadataClusterMarkerMetric] = [
{
"accessor": "avg_logFC",
"name": "Avg. logFC",
"description": f"Average log fold change from Wilcoxon test",
"threshold": 0,
"threshold_method": "lte_or_gte", # lte, lt, gte, gt, lte_or_gte, lte_and_gte
},
{
"accessor": "pval",
"name": "Adjusted P-Value",
"description": f"Adjusted P-Value from Wilcoxon test",
"threshold": 0.05,
"threshold_method": "lte", # lte, lt, gte, gt, lte_or_gte, lte_and_gte
},
],
):
"""
Export this LoomX object to Loom file
Parameters
---------
cluster_marker_metrics: dict, optional
List of dict (ClusterMarkerMetric) containing metadata of each metric available for the cluster markers.
Expects each metric to be of type float.
Return
------
None
"""
if output_format == "scope_v1":
#
_feature_names = self._data_matrix._feature_names
# Init
_row_attrs: MutableMapping = {}
_col_attrs: MutableMapping = {}
_global_attrs: MutableMapping = {
"title": os.path.splitext(os.path.basename(filename))[0]
if title is None
else title,
"MetaData": {
"annotations": [],
"metrics": [],
"embeddings": [],
"clusterings": [],
},
"Genome": genome,
}
# Add row attributes (in Loom specifications)
for _attr_key, _attr in self._feature_attrs:
_row_attrs[_attr_key] = _attr.values
# Add columns attributes (in Loom specifications)
_default_embedding = None
_embeddings_X = pd.DataFrame(index=self._data_matrix._observation_names)
_embeddings_Y =
|
pd.DataFrame(index=self._data_matrix._observation_names)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import copy
import math
from shapely.geometry.polygon import Polygon
# A shared random state will ensure that data is split in a same way in both train and test function
RANDOM_STATE = 42
def load_tabular_features_hadoop(distribution='all', matched=False, scale='all', minus_one=False):
tabular_path = 'data/join_results/train/join_cardinality_data_points_sara.csv'
print(tabular_path)
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
if distribution != 'all':
tabular_features_df = tabular_features_df[tabular_features_df['label'].str.contains('_{}'.format(distribution))]
if matched:
tabular_features_df = tabular_features_df[tabular_features_df['label'].str.contains('_Match')]
if scale != all:
tabular_features_df = tabular_features_df[tabular_features_df['label'].str.contains(scale)]
if minus_one:
tabular_features_df['join_sel'] = 1 - tabular_features_df['join_sel']
tabular_features_df = tabular_features_df.drop(columns=['label', 'coll1', 'D1', 'coll2', 'D2'])
tabular_features_df = tabular_features_df.rename(columns={x: y for x, y in zip(tabular_features_df.columns, range(0, len(tabular_features_df.columns)))})
# Get train and test data
train_data, test_data = train_test_split(tabular_features_df, test_size=0.20, random_state=RANDOM_STATE)
num_features = len(tabular_features_df.columns) - 1
X_train = pd.DataFrame.to_numpy(train_data[[i for i in range(num_features)]])
y_train = train_data[num_features]
X_test = pd.DataFrame.to_numpy(test_data[[i for i in range(num_features)]])
y_test = test_data[num_features]
return X_train, y_train, X_test, y_test
def load_tabular_features(join_result_path, tabular_path, normalize=False, minus_one=False, target='join_selectivity'):
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration']
join_df = pd.read_csv(join_result_path, delimiter=',', header=None, names=cols)
join_df = join_df[join_df.result_size != 0]
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset1', right_on='dataset_name')
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset2', right_on='dataset_name')
cardinality_x = join_df['cardinality_x']
cardinality_y = join_df['cardinality_y']
result_size = join_df['result_size']
mbr_tests = join_df['mbr_tests']
# x1_x, y1_x, x2_x, y2_x, x1_y, y1_y, x2_y, y2_y = join_df['x1_x'], join_df['y1_x'], join_df['x2_x'], join_df['y2_x'], join_df['x1_y'], join_df['y1_y'], join_df['x2_y'], join_df['y2_y']
# # Compute intersection area 1, intersection area 2 and area similarity
# intersect_x1 = pd.concat([x1_x, x1_y]).max(level=0)
# intersect_y1 = max(y1_x, y1_y)
# intersect_x2 = min(x2_x, x2_y)
# intersect_y2 = min(y2_x, y2_y)
# print(intersect_x1)
if minus_one:
join_selectivity = 1 - result_size / (cardinality_x * cardinality_y)
mbr_tests_selectivity = 1 - mbr_tests / (cardinality_x * cardinality_y)
else:
join_selectivity = result_size / (cardinality_x * cardinality_y)
mbr_tests_selectivity = mbr_tests / (cardinality_x * cardinality_y)
join_df = join_df.drop(
columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', 'mbr_tests', 'duration'])
if normalize:
column_groups = [
['AVG area_x', 'AVG area_y'],
['AVG x_x', 'AVG y_x', 'AVG x_y', 'AVG y_y'],
['E0_x', 'E2_x', 'E0_y', 'E2_y'],
['cardinality_x', 'cardinality_y'],
]
for column_group in column_groups:
input_data = join_df[column_group].to_numpy()
original_shape = input_data.shape
reshaped = input_data.reshape(input_data.size, 1)
reshaped = preprocessing.minmax_scale(reshaped)
join_df[column_group] = reshaped.reshape(original_shape)
# Rename the column's names to numbers for easier access
join_df = join_df.rename(columns={x: y for x, y in zip(join_df.columns, range(0, len(join_df.columns)))})
# Save the number of features in order to extract (X, y) correctly
num_features = len(join_df.columns)
# Append the target to the right of data frame
join_df.insert(len(join_df.columns), 'join_selectivity', join_selectivity, True)
join_df.insert(len(join_df.columns), 'mbr_tests_selectivity', mbr_tests_selectivity, True)
# TODO: delete this dumping action. This is just for debugging
join_df.to_csv('data/temp/join_df.csv')
# Split join data to train and test data
# target = 'join_selectivity'
train_data, test_data = train_test_split(join_df, test_size=0.20, random_state=RANDOM_STATE)
X_train = pd.DataFrame.to_numpy(train_data[[i for i in range(num_features)]])
y_train = train_data[target]
X_test = pd.DataFrame.to_numpy(test_data[[i for i in range(num_features)]])
y_test = test_data[target]
return X_train, y_train, X_test, y_test
def generate_tabular_features(join_result_path, tabular_path, output, normalize=False, minus_one=False):
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration', 'best_algorithm']
join_df = pd.read_csv(join_result_path, delimiter=',', header=None, names=cols)
best_algorithm = join_df['best_algorithm']
join_df = join_df[join_df.result_size != 0]
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset1', right_on='dataset_name')
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset2', right_on='dataset_name')
cardinality_x = join_df['cardinality_x']
cardinality_y = join_df['cardinality_y']
result_size = join_df['result_size']
mbr_tests = join_df['mbr_tests']
if minus_one:
join_selectivity = 1 - result_size / (cardinality_x * cardinality_y)
mbr_tests_selectivity = 1 - mbr_tests / (cardinality_x * cardinality_y)
else:
join_selectivity = result_size / (cardinality_x * cardinality_y)
mbr_tests_selectivity = mbr_tests / (cardinality_x * cardinality_y)
join_df = join_df.drop(
columns=['result_size', 'dataset_name_x', 'dataset_name_y', 'mbr_tests', 'duration', 'best_algorithm'])
if normalize:
column_groups = [
['AVG area_x', 'AVG area_y'],
['AVG x_x', 'AVG y_x', 'AVG x_y', 'AVG y_y'],
['E0_x', 'E2_x', 'E0_y', 'E2_y'],
['cardinality_x', 'cardinality_y'],
]
for column_group in column_groups:
input_data = join_df[column_group].to_numpy()
original_shape = input_data.shape
reshaped = input_data.reshape(input_data.size, 1)
reshaped = preprocessing.minmax_scale(reshaped)
join_df[column_group] = reshaped.reshape(original_shape)
# Append the target to the right of data frame
join_df.insert(len(join_df.columns), 'join_selectivity', join_selectivity, True)
join_df.insert(len(join_df.columns), 'mbr_tests_selectivity', mbr_tests_selectivity, True)
join_df.insert(len(join_df.columns), 'best_algorithm', best_algorithm, True)
join_df.to_csv(output, index=False)
def load_histogram_features(join_result_path, tabular_path, histograms_path, num_rows, num_columns):
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration']
join_df = pd.read_csv(join_result_path, delimiter=',', header=None, names=cols)
join_df = join_df[join_df.result_size != 0]
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset1', right_on='dataset_name')
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset2', right_on='dataset_name')
cardinality_x = join_df['cardinality_x']
cardinality_y = join_df['cardinality_y']
result_size = join_df['result_size']
join_selectivity = result_size / (cardinality_x * cardinality_y)
join_df.insert(len(join_df.columns), 'join_selectivity', join_selectivity, True)
ds1_histograms, ds2_histograms, ds1_original_histograms, ds2_original_histograms, ds_all_histogram, ds_bops_histogram = load_histograms(
join_df, histograms_path, num_rows, num_columns)
return join_df['join_selectivity'], ds1_histograms, ds2_histograms, ds_bops_histogram
def load_datasets_feature(filename):
features_df = pd.read_csv(filename, delimiter=',', header=0)
return features_df
def load_join_data(features_df, result_file, histograms_path, num_rows, num_columns):
cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration']
result_df = pd.read_csv(result_file, delimiter=',', header=None, names=cols)
result_df = result_df[result_df.result_size != 0]
# result_df = result_df.sample(frac=1)
result_df = pd.merge(result_df, features_df, left_on='dataset1', right_on='dataset_name')
result_df = pd.merge(result_df, features_df, left_on='dataset2', right_on='dataset_name')
# Load histograms
ds1_histograms, ds2_histograms, ds1_original_histograms, ds2_original_histograms, ds_all_histogram, ds_bops_histogram = load_histograms(
result_df, histograms_path, num_rows, num_columns)
# Compute BOPS
bops = np.multiply(ds1_original_histograms, ds2_original_histograms)
# print (bops)
bops = bops.reshape((bops.shape[0], num_rows * num_columns))
bops_values = np.sum(bops, axis=1)
bops_values = bops_values.reshape((bops_values.shape[0], 1))
# result_df['bops'] = bops_values
cardinality_x = result_df[' cardinality_x']
cardinality_y = result_df[' cardinality_y']
result_size = result_df['result_size']
mbr_tests = result_df['mbr_tests']
join_selectivity = 1 - result_size / (cardinality_x * cardinality_y)
# join_selectivity = join_selectivity * math.pow(10, 9)
join_selectivity_log = copy.deepcopy(join_selectivity)
join_selectivity_log = join_selectivity_log.apply(lambda x: (-1) * math.log10(x))
# print(join_selectivity)
# join_selectivity = -math.log10(join_selectivity)
mbr_tests_selectivity = mbr_tests / (cardinality_x * cardinality_y)
mbr_tests_selectivity = mbr_tests_selectivity * math.pow(10, 9)
duration = result_df['duration']
dataset1 = result_df['dataset1']
dataset2 = result_df['dataset2']
# result_df = result_df.drop(columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', ' cardinality_x', ' cardinality_y'])
# result_df = result_df.drop(
# columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y'])
result_df = result_df.drop(
columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', ' cardinality_x',
' cardinality_y', 'mbr_tests', 'duration'])
x = result_df.values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
result_df =
|
pd.DataFrame(x_scaled)
|
pandas.DataFrame
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import warnings
import biom
import qiime2
import pandas as pd
import numpy as np
import numpy.testing as npt
from q2_feature_table import _rename
class TestRename(unittest.TestCase):
def setUp(self):
self.old_ids = ['S1', 'S2', 'S3']
self.name_map = pd.Series({'S1': 'S1_new',
'S2': 'S2_new',
'S4': 'S4_name'})
self.known = {'S1': 'S1_new', 'S2': 'S2_new', 'S3': 'S3'}
def test_generate_new_names_non_unique(self):
name_map = pd.Series({'S1': 'S2_new', 'S2': 'S2_new'})
with self.assertRaises(ValueError) as cm:
_rename._generate_new_names(self.old_ids,
name_map,
strict=True,
verbose=False)
self.assertEqual(
str(cm.exception),
('All new ids must be unique.\n'
'Try the group method in this plugin if you want '
'to combine multiple samples in the same table.')
)
def test_generate_new_names_old_disjoint_strict(self):
with self.assertRaises(ValueError) as cm:
_rename._generate_new_names(self.old_ids,
self.name_map,
strict=True,
verbose=False)
self.assertEqual(
str(cm.exception),
("There are ids in the table which do not have new names.\n"
"Either turn off strict mode or provide a remapping for "
"all ids.\nThe following ids are not mapped:\n S3")
)
def test_generate_new_names_verbose_warnings(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
new_names = \
_rename._generate_new_names(self.old_ids,
self.name_map,
strict=False,
verbose=True)
self.assertEqual(len(w), 2)
self.assertTrue(isinstance(w[0].message, UserWarning))
self.assertEqual(str(w[0].message),
'There are ids in the original table which do not '
'have new names.\nThe following ids will not be '
'included:\n S3')
self.assertTrue(isinstance(w[1].message, UserWarning))
self.assertEqual(str(w[1].message),
'There are ids supplied for renaming that are not in'
' the table.\nThe following ids will not be mapped:'
'\n S4'
)
self.assertEqual(new_names.keys(), self.known.keys())
for k, v in new_names.items():
self.assertEqual(v, self.known[k])
def test_generate_new_names_no_verbse(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
new_names = \
_rename._generate_new_names(self.old_ids,
self.name_map,
strict=False,
verbose=False)
self.assertEqual(len(w), 0)
self.assertEqual(new_names.keys(), self.known.keys())
for k, v in new_names.items():
self.assertEqual(v, self.known[k])
def test_rename_samples(self):
table = biom.Table(np.array([[0, 1, 2], [3, 4, 5]]),
observation_ids=['01', '02'],
sample_ids=['S1', 'S2', 'S3'])
meta1 = qiime2.Metadata(pd.DataFrame(
data=np.array([['cat'], ['rat'], ['dog']]),
index=
|
pd.Index(['S1', 'S2', 'S3'], name='sample-id')
|
pandas.Index
|
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', pytest.param('pyephem', marks=requires_ephem)])
def test_get_extra_radiation(testval, expected, method):
out = irradiance.get_extra_radiation(testval, method=method)
assert_allclose(out, expected, atol=10)
def test_get_extra_radiation_epoch_year():
out = irradiance.get_extra_radiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
@requires_numba
def test_get_extra_radiation_nrel_numba(times):
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
result = irradiance.get_extra_radiation(
times, method='nrel', how='numba', numthreads=4)
# and reset to no-numba state
irradiance.get_extra_radiation(times, method='nrel')
assert_allclose(result,
[1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_get_extra_radiation_invalid():
with pytest.raises(ValueError):
irradiance.get_extra_radiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.get_ground_diffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(40, irrad_data['ghi'])
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(
40, irrad_data['ghi'], albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface(irrad_data):
with pytest.raises(KeyError):
irradiance.get_ground_diffuse(
40, irrad_data['ghi'], surface_type='invalid')
def test_grounddiffuse_albedo_surface(irrad_data):
result = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series(irrad_data):
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
# klucher inputs
surface_tilt, surface_azimuth = 40.0, 180.0
dhi, ghi = 100.0, 900.0
solar_zenith, solar_azimuth = 20.0, 180.0
# expect same result for floats and pd.Series
expected = irradiance.klucher(
surface_tilt, surface_azimuth,
pd.Series(dhi), pd.Series(ghi),
pd.Series(solar_zenith), pd.Series(solar_azimuth)
) # 94.99429931664851
result = irradiance.klucher(
surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth
)
assert_allclose(result, expected[0])
def test_klucher_series(irrad_data, ephem_data):
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
# pvlib matlab 1.4 does not contain the max(cos_tt, 0) correction
# so, these values are different
assert_allclose(result, [0., 36.789794, 109.209347, 56.965916], atol=1e-4)
# expect same result for np.array and pd.Series
expected = irradiance.klucher(
40, 180, irrad_data['dhi'].values, irrad_data['ghi'].values,
ephem_data['apparent_zenith'].values, ephem_data['azimuth'].values
)
assert_allclose(result, expected, atol=1e-4)
def test_haydavies(irrad_data, ephem_data, dni_et):
result = irradiance.haydavies(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
def test_reindl(irrad_data, ephem_data, dni_et):
result = irradiance.reindl(
40, 180, irrad_data['dhi'], irrad_data['dni'], irrad_data['ghi'],
dni_et, ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
def test_king(irrad_data, ephem_data):
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=irrad_data.index)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass,
return_components=True)
expected = pd.DataFrame(np.array(
[[ 0. , 31.46046871, np.nan, 45.45539877],
[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['sky_diffuse', 'isotropic', 'circumsolar', 'horizon'],
index=irrad_data.index
)
expected_for_sum = expected['sky_diffuse'].copy()
expected_for_sum.iloc[2] = 0
sum_components = out.iloc[:, 1:].sum(axis=1)
sum_components.name = 'sky_diffuse'
assert_frame_equal(out, expected, check_less_precise=2)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
def test_perez_arrays(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values,
relative_airmass.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
assert isinstance(out, np.ndarray)
def test_perez_scalar():
# copied values from fixtures
out = irradiance.perez(40, 180, 118.45831879, 939.95469881,
1321.1655834833093, 10.56413562, 144.76567754,
1.01688136)
# this will fail. out is ndarry with ndim == 0. fix in future version.
# assert np.isscalar(out)
assert_allclose(out, 109.084332)
@pytest.mark.parametrize('model', ['isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'])
def test_sky_diffuse_zenith_close_to_90(model):
# GH 432
sky_diffuse = irradiance.get_sky_diffuse(
30, 180, 89.999, 230,
dni=10, ghi=51, dhi=50, dni_extra=1360, airmass=12, model=model)
assert sky_diffuse < 100
def test_get_sky_diffuse_invalid():
with pytest.raises(ValueError):
irradiance.get_sky_diffuse(
30, 180, 0, 180, 1000, 1100, 100, dni_extra=1360, airmass=1,
model='invalid')
def test_liujordan():
expected = pd.DataFrame(np.array(
[[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
def test_get_total_irradiance(irrad_data, ephem_data, dni_et, relative_airmass):
models = ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
for model in models:
total = irradiance.get_total_irradiance(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=relative_airmass,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_get_total_irradiance_scalars(model):
total = irradiance.get_total_irradiance(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_poa_components(irrad_data, ephem_data, dni_et, relative_airmass):
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
gr_sand = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], relative_airmass)
out = irradiance.poa_components(
aoi, irrad_data['dni'], diff_perez, gr_sand)
expected = pd.DataFrame(np.array(
[[ 0. , -0. , 0. , 0. ,
0. ],
[ 35.19456561, 0. , 35.19456561, 31.4635077 ,
3.73105791],
[956.18253696, 798.31939281, 157.86314414, 109.08433162,
48.77881252],
[ 90.99624896, 33.50143401, 57.49481495, 45.45978964,
12.03502531]]),
columns=['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=irrad_data.index)
assert_frame_equal(out, expected)
@pytest.mark.parametrize('pressure,expected', [
(93193, [[830.46567, 0.79742, 0.93505],
[676.09497, 0.63776, 3.02102]]),
(None, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]]),
(101325, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]])
])
def test_disc_value(pressure, expected):
# see GH 449 for pressure=None vs. 101325.
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2014-06-24T1200', '2014-06-24T1800'],
tz='America/Phoenix')
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
out = irradiance.disc(ghi, zenith, times, pressure=pressure)
expected_values = np.array(expected)
expected = pd.DataFrame(expected_values, columns=columns, index=times)
# check the pandas dataframe. check_less_precise is weird
assert_frame_equal(out, expected, check_less_precise=True)
# use np.assert_allclose to check values more clearly
assert_allclose(out.values, expected_values, atol=1e-5)
def test_disc_overirradiance():
columns = ['dni', 'kt', 'airmass']
ghi = np.array([3000])
solar_zenith = np.full_like(ghi, 0)
times = pd.date_range(start='2016-07-19 12:00:00', freq='1s',
periods=len(ghi), tz='America/Phoenix')
out = irradiance.disc(ghi=ghi, solar_zenith=solar_zenith,
datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[8.72544336e+02, 1.00000000e+00, 9.99493933e-01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_disc_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith and/or max_airmass keep these results reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100)
expected = pd.DataFrame(np.array(
[[0., 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[277.50185968, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith keeps this result reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_airmass=100)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 36.39544757]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow zenith to be close to 90 and airmass to be infinite
# and we get crazy values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[6.68577449e+03, 1.16046346e-02, 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow min cos zenith to be 0, zenith to be close to 90,
# and airmass to be very big and we get even higher DNI values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[7.21238390e+03, 1., 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([868.8, 699.7]), 1)
def test_dirint_nans():
times = pd.date_range(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([882.1, 672.6]), 1)
def test_dirint_no_delta_kt():
times =
|
pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
|
pandas.DatetimeIndex
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 13:59:57 2018
@author: Apoorb
"""
import os
import pandas as pd
from io import StringIO # Needed to read the data
import math #To use math.log
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from sklearn import linear_model
import statistics
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from statsmodels.graphics.factorplots import interaction_plot
from scipy import stats
import seaborn as sns
print('Current working directory ',os.getcwd())
#os.chdir('/Users/Apoorb/Documents/GitHub/Python-Code-Compilation')
os.chdir("C:\\Users\\a-bibeka\\Documents\\GitHub\\Python-Code-Compilation")
print('Current working directory ',os.getcwd())
# Function to plot the Half-Normal Plot
def HalfPlt_V1(DatTemp,Theta,Var_,PltName):
'''
DatTemp : Dataset with the effecs {"FactEff":[#,#,....],"Var1":["A","B"....]}
Theta : column name for effects; "FactEff"
Var_ : column name for list of variables; "Var1"
PltName : Name of the Half plot
'''
#Get the # of effects
len1 =len(DatTemp[Var_])
DatTemp['absTheta']=DatTemp[Theta].apply(abs)
DatTemp=DatTemp.sort_values(by=['absTheta'])
#Need to reset index after sort orderwise ploting will have error
DatTemp = DatTemp.reset_index(drop=True)
#Get the index of each sorted effect
DatTemp['i']= np.linspace(1,len1,len1).tolist()
DatTemp['NorQuant']=DatTemp['i'].apply(lambda x:norm.ppf(0.5+0.5*(x-0.5)/len1))
fig1, ax1 =plt.subplots()
ax1.scatter(DatTemp['NorQuant'], DatTemp['absTheta'], marker='x', color='red')
#Name all the points using Var1, enumerate gives index and value
for j,type in enumerate(DatTemp[Var_]):
x = DatTemp['NorQuant'][j]
y = DatTemp['absTheta'][j]
ax1.text(x+0.05, y+0.05, type, fontsize=9)
ax1.set_title("Half-Normal Plot")
ax1.set_xlabel("Normal Quantile")
ax1.set_ylabel("effects")
fig1.savefig(PltName)
# Function to perform Lenth test
#Lenth's Method for testing signficance for experiments without
# variance estimate
def LenthsTest(dat,fef,fileNm,IER_Alpha=2.30):
'''
dat: Dataset with the effecs {"FactEff":[#,#,....],"Var1":["A","B"....]}
fef = column name for effects; "FactEff"
IER_Alpha = IER for n effects and alpha
'''
#Get the # of effects
len1=len(dat[fef])
dat['absEff']=dat[fef].apply(abs)
s0=1.5*statistics.median(map(float,dat['absEff']))
#Filter the effects
tpLst=[i for i in dat['absEff'] if i<2.5*s0]
#Get PSE
PSE =1.5 * statistics.median(tpLst)
#Lenth's t stat
dat['t_PSE'] = (round(dat[fef]/PSE,2))
dat['IER_Alpha']=[IER_Alpha]*len1
dat['Significant'] = dat.apply(lambda x : 'Significant' if abs(x['t_PSE']) > x['IER_Alpha'] else "Not Significant", axis=1)
dat=dat[["Var1","FactEff","t_PSE","IER_Alpha","Significant"]]
dat.to_csv(fileNm)
return(dat)
TempIO= StringIO('''
Run A C E B D F S1 S2
1 0 0 0 0 0 0 18.25 17.25
2 0 0 1 1 1 1 4.75 7.5
3 0 0 2 2 2 2 11.75 11.25
4 0 1 0 1 1 1 13.0 8.75
5 0 1 1 2 2 2 12.5 11.0
6 0 1 2 0 0 0 9.25 13.0
7 0 2 0 2 2 2 21.0 15.0
8 0 2 1 0 0 0 3.5 5.25
9 0 2 2 1 1 1 4.0 8.5
10 1 0 0 0 1 2 6.75 15.75
11 1 0 1 1 2 0 5.0 13.75
12 1 0 2 2 0 1 17.25 13.5
13 1 1 0 1 2 0 13.5 21.25
14 1 1 1 2 0 1 9.0 10.25
15 1 1 2 0 1 2 15.0 9.75
16 1 2 0 2 0 1 10.5 8.25
17 1 2 1 0 1 2 11.0 11.5
18 1 2 2 1 2 0 19.75 14.25
19 2 0 0 0 2 1 17.0 20.0
20 2 0 1 1 0 2 17.75 17.5
21 2 0 2 2 1 0 13.0 12.0
22 2 1 0 1 0 2 8.75 12.25
23 2 1 1 2 1 0 12.25 9.0
24 2 1 2 0 2 1 13.0 11.25
25 2 2 0 2 1 0 10.0 10.0
26 2 2 1 0 2 1 14.5 17.75
27 2 2 2 1 0 2 8.0 11.0
''')
Df=pd.read_csv(TempIO,delimiter=r"\s+",header=0)
Df.head()
Df.A=Df.A.apply(int)
DesMat=Df[["A","B","C","D","E","F"]]
DesMat.loc[:,"AC"]=(DesMat.A+DesMat.C)%3
DesMat.loc[:,"AC2"]=(DesMat.A+2*DesMat.C)%3
DesMat.loc[:,"AE"]=(DesMat.A+DesMat.E)%3
DesMat.loc[:,"AE2"]=(DesMat.A+2*DesMat.E)%3
Df.loc[:,"Sbar"]=Df[["S1","S2"]].apply(statistics.mean,axis=1)
Df.loc[:,"S_lns2"]=Df[["S1","S2"]].apply(statistics.variance,axis=1).apply(lambda x : math.log(x) if x!=0 else math.log(0.1**20))
f,axes=plt.subplots(2,3,sharex=True,sharey=True)
g=sns.factorplot(x="A",y="Sbar",data=Df,ci=None,ax=axes[0,0])
g=sns.factorplot(x="B",y="Sbar",data=Df,ci=None,ax=axes[0,1])
g=sns.factorplot(x="C",y="Sbar",data=Df,ci=None,ax=axes[0,2])
g=sns.factorplot(x="D",y="Sbar",data=Df,ci=None,ax=axes[1,0])
g=sns.factorplot(x="E",y="Sbar",data=Df,ci=None,ax=axes[1,1])
g=sns.factorplot(x="F",y="Sbar",data=Df,ci=None,ax=axes[1,2])
plt.tight_layout()
f.savefig("MainEffPlt.png")
fig1=interaction_plot(Df.A,Df.C,Df.Sbar)
fig2=interaction_plot(Df.A,Df.E,Df.Sbar)
#frames=[DesMat,DesMat]
#Df1=pd.concat(frames)
#Df1.loc[:,"Y"]=Df.S1.tolist()+Df.S2.tolist()
#
#Df1.to_csv("Q9Dat.csv")
f2,axes1=plt.subplots(2,3,sharex=True,sharey=True)
g=sns.factorplot(x="A",y="S_lns2",data=Df,ci=None,ax=axes1[0,0])
g=sns.factorplot(x="B",y="S_lns2",data=Df,ci=None,ax=axes1[0,1])
g=sns.factorplot(x="C",y="S_lns2",data=Df,ci=None,ax=axes1[0,2])
g=sns.factorplot(x="D",y="S_lns2",data=Df,ci=None,ax=axes1[1,0])
g=sns.factorplot(x="E",y="S_lns2",data=Df,ci=None,ax=axes1[1,1])
g=sns.factorplot(x="F",y="S_lns2",data=Df,ci=None,ax=axes1[1,2])
plt.tight_layout()
fig1=interaction_plot(Df.A,Df.C,Df.S_lns2)
fig2=interaction_plot(Df.A,Df.E,Df.S_lns2)
regr=linear_model.LinearRegression()
# Train the model using the training sets
Zs1=Df.S_lns2
R1=regr.fit(DesMat,Zs1)
R1.intercept_
ef2=R1.coef_
ef2
dat=pd.DataFrame({"FactEff":ef2,"Var1":DesMat.columns})
#dat
HalfPlt_V1(dat,'FactEff','Var1','HalfPlotStrn_Q9.png')
#IER at alpha =5% and
LenthsTest(dat,'FactEff',"LenthTestDisper_Str_Q9.csv",IER_Alpha=2.21)
def contrast_l(ef):
contr=int()
if(ef==0):contr=-1/math.sqrt(2)
elif(ef==1):contr=0/math.sqrt(2)
else:contr=1/math.sqrt(2)
return contr
def contrast_q(ef):
contr=int()
if(ef==0):contr=1/math.sqrt(6)
elif(ef==1):contr=-2/math.sqrt(6)
else:contr=1/math.sqrt(6)
return contr
# 6 Main effects
datXy=
|
pd.DataFrame()
|
pandas.DataFrame
|
import torch
import numpy as np
import pandas as pd
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set(
font_scale=1.5,
style="whitegrid",
rc={
'text.usetex' : False,
'lines.linewidth': 3
}
)
import glob
import torch.optim
import torch
import argparse
import utils
def process_df(quant, dirname, stats_ref=None, args=None, args_model=None, save=True):
global table_format
col_names = ["experiment", "stat", "set", "layer"]
quant = utils.assert_col_order(quant, col_names, id_vars="draw")
keys = list(quant.columns.levels[0].sort_values())
output_root = os.path.join(dirname, f"meta_" + "_".join(keys))
os.makedirs(output_root, exist_ok=True)
idx = pd.IndexSlice
cols_error = idx[:, 'error', :, :]
N_L = len(quant.columns.unique(level="layer")) # number of hidden layers
# errors = quant["error"]
# losses = quant["loss"]
quant.drop("val", axis=1,level="set", inplace=True, errors='ignore')
quant.drop(("test", "loss"), axis=1, inplace=True, errors='ignore')
if save:
quant.to_csv(os.path.join(output_root, 'merge.csv'))
if stats_ref is not None:
stats_ref.to_csv(os.path.join(output_root, 'stats_ref.csv'))
quant.sort_index(axis=1, inplace=True)
quant.loc[:, cols_error] *= 100 # in %
quant.groupby(level=["experiment", "stat", "set"], axis=1, group_keys=False).describe().to_csv(os.path.join(output_root, 'describe.csv'))
df_reset = quant.reset_index()
df_plot = pd.melt(df_reset, id_vars='draw')#.query("layer>0")
# df_plot_no_0 = df_plot.query('layer>0')
quant_ref = None
if stats_ref is not None:
N_S = len(stats_ref)
quant_ref_merge = pd.DataFrame()
stats_ref.loc["error"] = stats_ref["error"].values * 100
for key in keys:
quant_ref_merge = pd.concat([quant_ref_merge, quant_ref])
# N_S_key = len(quant[key].columns.get_level_values("stat").unique())
N_L_key = len(quant[key].columns.get_level_values("layer").unique())
quant_ref_key = stats_ref.iloc[np.tile(np.arange(N_S).reshape(N_S, 1), (N_L_key)).ravel()].to_frame(name="value").droplevel("layer")
quant_ref_merge = pd.concat([quant_ref_merge, quant_ref_key])
quant_ref = stats_ref.iloc[np.repeat(np.arange(N_S), (N_L))].to_frame(name="value").droplevel("layer").value
try:
utils.to_latex(output_root, (quant-quant_ref_merge.value.values).abs(), table_format, key_err="error")
except:
pass
is_vgg = 'vgg' in dirname
dataset = 'CIFAR10' if 'cifar' in dirname else 'MNIST'
# if args_model is not None:
xlabels=[str(i) for i in range(N_L)]
palette=sns.color_palette(n_colors=len(keys)) # the two experiments
fig, axes = plt.subplots(2, 1, figsize=(4, 8), sharex=False)
# sns.set(font_scale=1,rc={"lines.linewidth":3})
# fig.suptitle("{} {}".format('VGG' if is_vgg else 'FCN', dataset.upper()))
k = 0
#rp.set_axis_labels("layer", "Loss", labelpad=10)
#quant.loc[1, Idx["loss", :, 0]].lineplot(x="layer_ids", y="value", hue="")
for i, stat in enumerate(["loss","error" ]):
for j, setn in enumerate(["train","test"]):
if stat == "loss" and setn=="test":
continue
if stat == "error" and setn=="train":
continue
# axes[k] = rp.axes[j,i]
ax = axes.flatten()[k]
df_plot = quant.loc[:, Idx[:, stat, setn, :]].min(axis=0).to_frame(name="value")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
# data=df_plot_rel if not is_vgg else df_plot_rel.pivot(index="draw", columns=col_order).min(axis=0).to_frame(name="value"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci='sd',
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
# linewidth=3.,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
# rp.axes[0,1].locator_params(axis='x', nbins=len(xlabels))
# if k == 1:
# if k==1:
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
# else:
# lp.set_xticklabels(len(xlabels)*[None])
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*"ing", stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
# ax.tick_params(labelbottom=True)
if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
ax.axline((0,quant_ref[stat, setn][0]), slope=0, ls=":", zorder=2, c='g')
# for ax in ax.lines[-1:]: # the last two
# ax.set_linestyle('--')
k += 1
# fig.subplots_adjust(top=0.85)
# if is_vgg:
labels=keys + ["Ref."]
fig.legend(handles=ax.lines, labels=labels, title="Exp.", loc="upper right", borderaxespad=0, bbox_to_anchor=(0.9,0.9))#, bbox_transform=fig.transFigure)
fig.tight_layout()
plt.margins()
fig.savefig(fname=os.path.join(output_root, "train_loss_test_error.pdf"), bbox_inches='tight')
k=0
# sns.set(font_scale=1,rc={"lines.linewidth":3})
fig, axes = plt.subplots(1, 1, figsize=(4, 4), sharex=False)
# fig.suptitle("{} {}".format('VGG' if is_vgg else 'FCN', dataset.upper()))
for i, stat in enumerate(["error"]):
for j, setn in enumerate(["train"]):
if stat == "loss" and setn=="test":
continue
if stat=="error" and setn=="test":
continue
# axes[k] = rp.axes[j,i]
ax = axes
df_plot = quant.loc[:, Idx[:, stat, setn, :]].min(axis=0).to_frame(name="value")
lp = sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
# data=df_plot_rel if not is_vgg else df_plot_rel.pivot(index="draw", columns=col_order).min(axis=0).to_frame(name="value"),
data=df_plot,
#hue="width",
hue="experiment",
hue_order=keys,
x="layer",
y="value",
legend=None,
# style='set',
ci='sd',
palette=palette,
#style='layer',
markers=False,
ax=ax,
dashes=True,
#legend_out=True,
#y="value",
)
lp.set(xticks=range(0, len(xlabels)))
# rp.set_xticklabels(xlabels)
# rp.axes[0,0].locator_params(axis='x', nbins=len(xlabels))
# rp.axes[0,1].locator_params(axis='x', nbins=len(xlabels))
lp.set_xticklabels(xlabels)#, rotation=40*(is_vgg))
ax.set_title("{} {}{}".format(setn.title()+(setn=="train")*'ing', stat.title(), " (%)" if stat=="error" else ''))
# ylabel = stat if stat == "loss" else "error (%)"
ax.set_xlabel("layer index l")
ax.set_ylabel(None)
if quant_ref is not None:
# data_ref = quant_ref[stat, setn].reset_index()
ax.axline((0,quant_ref[stat, setn][0]), slope=0, ls=":", zorder=2, c='g')
k += 1
labels=keys + ["Ref."]
fig.legend(handles=ax.lines, labels=keys, title="Exp.", loc="upper right", bbox_to_anchor=(0.9,0.9),borderaxespad=0)#, bbox_transform=fig.transFigure)
plt.margins()
plt.savefig(fname=os.path.join(output_root, "error_train.pdf"), bbox_inches='tight')
if "B" in keys:
df_B = quant["B"]
elif "B2" in keys:
df_B = quant["B2"]
else:
return
n_draws = len(df_B.index)
# vary_draw=copy.deepcopy(df_B)
df_B_plot = pd.melt(df_B.reset_index(), id_vars="draw")
cp = sns.FacetGrid(
data=df_B_plot,
# hue="experiment",
# hue_order=["A", "B"],
col="stat",
col_order=["loss", "error"],
row="set",
row_order=["train", "test"],
sharey= False,
sharex= True,
#y="value",
)
styles=['dotted', 'dashed', 'dashdot', 'solid']
# for i_k, k in enumerate([10, 50, 100, 200]):
draws = len(df_B.index)
df_bound =
|
pd.DataFrame(columns=df_B.columns)
|
pandas.DataFrame
|
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import os
import tempfile
import unittest
# noinspection PyPackageRequirements
import pytest
from pandas.tests.extension import base
from text_extensions_for_pandas.array.test_span import ArrayTestBase
from text_extensions_for_pandas.array.span import *
from text_extensions_for_pandas.array.token_span import *
class TokenSpanTest(ArrayTestBase):
def test_create(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 1)
self.assertEqual(s1.covered_text, "This")
# Begin too small
with self.assertRaises(ValueError):
TokenSpan(toks, -2, 4)
# End too small
with self.assertRaises(ValueError):
TokenSpan(toks, 1, -1)
# End too big
with self.assertRaises(ValueError):
TokenSpan(toks, 1, 10)
# Begin null, end not null
with self.assertRaises(ValueError):
TokenSpan(toks, TokenSpan.NULL_OFFSET_VALUE, 0)
def test_repr(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 2)
self.assertEqual(repr(s1), "[0, 7): 'This is'")
toks2 = SpanArray(
"This is a really really really really really really really really "
"really long string.",
np.array([0, 5, 8, 10, 17, 24, 31, 38, 45, 52, 59, 66, 73, 78, 84]),
np.array([4, 7, 9, 16, 23, 30, 37, 44, 51, 58, 65, 72, 77, 84, 85]),
)
self._assertArrayEquals(
toks2.covered_text,
[
"This",
"is",
"a",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"long",
"string",
".",
],
)
s2 = TokenSpan(toks2, 0, 4)
self.assertEqual(repr(s2), "[0, 16): 'This is a really'")
s2 = TokenSpan(toks2, 0, 15)
self.assertEqual(
repr(s2),
"[0, 85): 'This is a really really really really really really "
"really really really [...]'"
)
def test_equals(self):
toks = self._make_spans_of_tokens()
other_toks = toks[:-1].copy()
s1 = TokenSpan(toks, 0, 2)
s2 = TokenSpan(toks, 0, 2)
s3 = TokenSpan(toks, 0, 3)
s4 = TokenSpan(other_toks, 0, 2)
s5 = Span(toks.target_text, s4.begin, s4.end)
s6 = Span(toks.target_text, s4.begin, s4.end + 1)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
self.assertEqual(s1, s4)
self.assertEqual(s1, s5)
self.assertEqual(s5, s1)
self.assertNotEqual(s1, s6)
def test_less_than(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
self.assertLess(s1, s3)
self.assertLessEqual(s1, s3)
self.assertFalse(s1 < s2)
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
self.assertEqual(s1 + s2, s1)
self.assertEqual(char_s1 + s2, char_s1)
self.assertEqual(s2 + char_s1, char_s1)
self.assertEqual(char_s2 + char_s1, char_s1)
self.assertEqual(s2 + s3, TokenSpan(toks, 2, 4))
def test_hash(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 0, 3)
s3 = TokenSpan(toks, 3, 4)
d = {s1: "foo"}
self.assertEqual(d[s1], "foo")
self.assertEqual(d[s2], "foo")
d[s2] = "bar"
d[s3] = "fab"
self.assertEqual(d[s1], "bar")
self.assertEqual(d[s2], "bar")
self.assertEqual(d[s3], "fab")
class TokenSpanArrayTest(ArrayTestBase):
def _make_spans(self):
toks = self._make_spans_of_tokens()
return TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
def test_create(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
with self.assertRaises(TypeError):
TokenSpanArray(self._make_spans_of_tokens(), "Not a valid begins list", [42])
def test_dtype(self):
arr = self._make_spans()
self.assertTrue(isinstance(arr.dtype, TokenSpanDtype))
def test_len(self):
self.assertEqual(len(self._make_spans()), 7)
def test_getitem(self):
arr = self._make_spans()
self.assertEqual(arr[2].covered_text, "a")
self._assertArrayEquals(arr[2:4].covered_text, ["a", "test"])
def test_setitem(self):
arr = self._make_spans()
arr[1] = arr[2]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", "test"])
arr[3] = None
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", None])
with self.assertRaises(ValueError):
arr[0] = "Invalid argument for __setitem__()"
arr[0:2] = arr[0]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "This", "a", None])
arr[[0, 1, 3]] = None
self._assertArrayEquals(arr.covered_text[0:4], [None, None, "a", None])
arr[[2, 1, 3]] = arr[[4, 5, 6]]
self._assertArrayEquals(
arr.covered_text[0:4], [None, "a test", "This is", "This is a test"]
)
def test_equals(self):
arr = self._make_spans()
self._assertArrayEquals(arr[0:4] == arr[1], [False, True, False, False])
arr2 = self._make_spans()
self._assertArrayEquals(arr == arr, [True] * 7)
self._assertArrayEquals(arr == arr2, [True] * 7)
self._assertArrayEquals(arr[0:3] == arr[3:6], [False, False, False])
arr3 = SpanArray(arr.target_text, arr.begin, arr.end)
self._assertArrayEquals(arr == arr3, [True] * 7)
self._assertArrayEquals(arr3 == arr, [True] * 7)
def test_not_equals(self):
arr = self._make_spans()
arr2 = self._make_spans()
self._assertArrayEquals(arr[0:4] != arr[1], [True, False, True, True])
self._assertArrayEquals(arr != arr2, [False] * 7)
self._assertArrayEquals(arr[0:3] != arr[3:6], [True, True, True])
def test_concat_same_type(self):
arr = self._make_spans()
arr2 = self._make_spans()
# Type: TokenSpanArray
arr3 = TokenSpanArray._concat_same_type((arr, arr2))
self._assertArrayEquals(arr3.covered_text, np.tile(arr2.covered_text, 2))
def test_from_factorized(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_factorized(spans_list, arr)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_from_sequence(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_sequence(spans_list)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_nulls(self):
arr = self._make_spans()
self._assertArrayEquals(arr.isna(), [False] * 7)
self.assertFalse(arr.have_nulls)
arr[2] = TokenSpan.make_null(arr.tokens)
self.assertIsNone(arr.covered_text[2])
self._assertArrayEquals(arr[0:4].covered_text, ["This", "is", None, "test"])
self._assertArrayEquals(arr[0:4].isna(), [False, False, True, False])
self.assertTrue(arr.have_nulls)
def test_copy(self):
arr = self._make_spans()
arr2 = arr.copy()
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
self.assertEqual(arr[1], arr2[1])
arr[1] = TokenSpan.make_null(arr.tokens)
self.assertNotEqual(arr[1], arr2[1])
# Double underscore because you can't call a test case "test_take"
def test_take(self):
arr = self._make_spans()
arr2 = arr.take([1, 1, 2, 3, 5, -1])
self._assertArrayEquals(
arr2.covered_text, ["is", "is", "a", "test", "a test", "This is a test"]
)
arr3 = arr.take([1, 1, 2, 3, 5, -1], allow_fill=True)
self._assertArrayEquals(
arr3.covered_text, ["is", "is", "a", "test", "a test", None]
)
def test_less_than(self):
tokens = self._make_spans_of_tokens()
arr1 = TokenSpanArray(tokens, [0, 2], [4, 3])
s1 = TokenSpan(tokens, 0, 1)
s2 = TokenSpan(tokens, 3, 4)
arr2 = TokenSpanArray(tokens, [0, 3], [0, 4])
self._assertArrayEquals(s1 < arr1, [False, True])
self._assertArrayEquals(s2 > arr1, [False, True])
self._assertArrayEquals(arr1 < s1, [False, False])
self._assertArrayEquals(arr1 < arr2, [False, True])
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
s4 = TokenSpan(toks, 2, 4)
s5 = TokenSpan(toks, 0, 3)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
char_s3 = Span(s3.target_text, s3.begin, s3.end)
char_s4 = Span(s4.target_text, s4.begin, s4.end)
char_s5 = Span(s5.target_text, s5.begin, s5.end)
# TokenSpanArray + TokenSpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
TokenSpanArray._from_sequence([s1, s4, s3]),
)
# SpanArray + TokenSpanArray
self._assertArrayEquals(
SpanArray._from_sequence([char_s1, char_s2, char_s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + SpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ SpanArray._from_sequence([char_s2, char_s3, char_s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + TokenSpan
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + s2,
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpan + TokenSpanArray
self._assertArrayEquals(
s2 + TokenSpanArray._from_sequence([s1, s2, s3]),
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpanArray + Span
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + char_s2,
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
# Span + SpanArray
self._assertArrayEquals(
char_s2 + SpanArray._from_sequence([char_s1, char_s2, char_s3]),
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
def test_reduce(self):
arr = self._make_spans()
self.assertEqual(arr._reduce("sum"), TokenSpan(arr.tokens, 0, 4))
# Remind ourselves to modify this test after implementing min and max
with self.assertRaises(TypeError):
arr._reduce("min")
def test_make_array(self):
arr = self._make_spans()
arr_series = pd.Series(arr)
toks_list = [arr[0], arr[1], arr[2], arr[3]]
self._assertArrayEquals(
TokenSpanArray.make_array(arr).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(arr_series).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(toks_list).covered_text,
["This", "is", "a", "test"],
)
def test_begin_and_end(self):
arr = self._make_spans()
self._assertArrayEquals(arr.begin, [0, 5, 8, 10, 0, 8, 0])
self._assertArrayEquals(arr.end, [4, 7, 9, 14, 7, 14, 14])
def test_normalized_covered_text(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.normalized_covered_text,
["this", "is", "a", "test", "this is", "a test", "this is a test"],
)
def test_as_frame(self):
arr = self._make_spans()
df = arr.as_frame()
self._assertArrayEquals(
df.columns, ["begin", "end", "begin_token", "end_token", "covered_text"]
)
self.assertEqual(len(df), len(arr))
class TokenSpanArrayIOTests(ArrayTestBase):
def do_roundtrip(self, df):
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, 'token_span_array_test.feather')
df.to_feather(filename)
df_read = pd.read_feather(filename)
pd.testing.assert_frame_equal(df, df_read)
def test_feather(self):
toks = self._make_spans_of_tokens()
# Equal token spans to tokens
ts1 = TokenSpanArray(toks, np.arange(len(toks)), np.arange(len(toks)) + 1)
df1 =
|
pd.DataFrame({"ts1": ts1})
|
pandas.DataFrame
|
# coding: utf-8
import pymysql
import numpy as np
import pandas as pd
import csv
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from xgboost import plot_tree
# 필요한 다른 python 파일
import feature
###################### DB connect
db = pymysql.connect(host="", port=3306, user="", passwd="",db="")
### train_set - 뼈대
def make_train_set():
SQL = "SELECT order_id, user_id, order_dow, order_hour_of_day FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print("make train set - basic start")
# ------------------ train id에 맞는 유저를 찾은 뒤 그 유저가 최근에 샀던 상품 확인
# order_id 중복 제거 >> 갯수 세는 것 같지만 중복 제거
train_df= train_df.groupby("order_id").aggregate("count").reset_index()
# order_id에 맞는 user_id를 찾아서 merge
train_df = pd.merge(train_df, orders_df, how="inner", on="order_id")
# prior과 merge
# 유저와 order_id 에 맞는 상품 목록
train_df = pd.merge(train_df, feature.latest_order(), how="inner", on="user_id")
# product table에서 id, 소분류, 대분류만 가져와서 merge
# products_df = pd.read_csv( "products.csv", usecols=["product_id", "aisle_id", "department_id"])
SQL = "SELECT product_id, aisle_id, department_id FROM products"
products_df = pd.read_sql(SQL, db)
train_df = pd.merge(train_df, products_df, how="inner", on="product_id")
del products_df, orders_df, SQL
print("make train set - basic finish")
return train_df
'''
새로 만든 feature를 붙이는 부분
만들어진 것은 많지만 제일 정확성이 높은 것만 활용
'''
def train_result():
train_x = make_train_set()
train_x = pd.merge(train_x, feature.order_ratio_bychance(), how="left", on = ["user_id, product_id"])
return train_x
### train answer : train_y
def make_answer(train_x):
SQL = "SELECT order_id, user_id FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id, product_id, reordered FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print ("train_y start")
answer = pd.merge(train_df, orders_df, how="inner", on="order_id")
del orders_df, train_df
#order_id 제거
answer = answer[["user_id", "product_id", "reordered"]]
# train과 그 외 정보를 merge >>>> train_result() 를 train_x로 파라미터 받아올까?
train_df =
|
pd.merge(train_x, answer, how="left", on=["user_id", "product_id"])
|
pandas.merge
|
import pandas as pd
import tqdm
from pynput import keyboard
import bird_view.utils.bz_utils as bzu
import bird_view.utils.carla_utils as cu
from bird_view.models.common import crop_birdview
from perception.utils.helpers import get_segmentation_tensor
from perception.utils.segmentation_labels import DEFAULT_CLASSES
from perception.utils.visualization import get_rgb_segmentation, get_segmentation_colors
def _paint(observations, control, diagnostic, debug, env, show=False, use_cv=False, trained_cv=False):
import cv2
import numpy as np
WHITE = (255, 255, 255)
RED = (255, 0, 0)
CROP_SIZE = 192
X = 176
Y = 192 // 2
R = 2
birdview = cu.visualize_birdview(observations['birdview'])
birdview = crop_birdview(birdview)
if 'big_cam' in observations:
canvas = np.uint8(observations['big_cam']).copy()
rgb = np.uint8(observations['rgb']).copy()
else:
canvas = np.uint8(observations['rgb']).copy()
def _stick_together(a, b, axis=1):
if axis == 1:
h = min(a.shape[0], b.shape[0])
r1 = h / a.shape[0]
r2 = h / b.shape[0]
a = cv2.resize(a, (int(r1 * a.shape[1]), int(r1 * a.shape[0])))
b = cv2.resize(b, (int(r2 * b.shape[1]), int(r2 * b.shape[0])))
return np.concatenate([a, b], 1)
else:
h = min(a.shape[1], b.shape[1])
r1 = h / a.shape[1]
r2 = h / b.shape[1]
a = cv2.resize(a, (int(r1 * a.shape[1]), int(r1 * a.shape[0])))
b = cv2.resize(b, (int(r2 * b.shape[1]), int(r2 * b.shape[0])))
return np.concatenate([a, b], 0)
def _stick_together_and_fill(a, b):
# sticks together a and b.
# a should be wider than b, and b will be filled with black pixels to match a's width.
w_diff = a.shape[1] - b.shape[1]
fill = np.zeros(shape=(b.shape[0], w_diff, 3), dtype=np.uint8)
b_filled = np.concatenate([b, fill], axis=1)
return np.concatenate([a, b_filled], axis=0)
def _write(text, i, j, canvas=canvas, fontsize=0.4):
rows = [x * (canvas.shape[0] // 10) for x in range(10+1)]
cols = [x * (canvas.shape[1] // 9) for x in range(9+1)]
cv2.putText(
canvas, text, (cols[j], rows[i]),
cv2.FONT_HERSHEY_SIMPLEX, fontsize, WHITE, 1)
_command = {
1: 'LEFT',
2: 'RIGHT',
3: 'STRAIGHT',
4: 'FOLLOW',
}.get(observations['command'], '???')
if 'big_cam' in observations:
fontsize = 0.8
else:
fontsize = 0.4
_write('Command: ' + _command, 1, 0, fontsize=fontsize)
_write('Velocity: %.1f' % np.linalg.norm(observations['velocity']), 2, 0, fontsize=fontsize)
_write('Steer: %.2f' % control.steer, 4, 0, fontsize=fontsize)
_write('Throttle: %.2f' % control.throttle, 5, 0, fontsize=fontsize)
_write('Brake: %.1f' % control.brake, 6, 0, fontsize=fontsize)
_write('Collided: %s' % diagnostic['collided'], 1, 6, fontsize=fontsize)
_write('Invaded: %s' % diagnostic['invaded'], 2, 6, fontsize=fontsize)
_write('Lights Ran: %d/%d' % (env.traffic_tracker.total_lights_ran, env.traffic_tracker.total_lights), 3, 6, fontsize=fontsize)
_write('Goal: %.1f' % diagnostic['distance_to_goal'], 4, 6, fontsize=fontsize)
_write('Time: %d' % env._tick, 5, 6, fontsize=fontsize)
_write('Time limit: %d' % env._timeout, 6, 6, fontsize=fontsize)
_write('FPS: %.2f' % (env._tick / (diagnostic['wall'])), 7, 6, fontsize=fontsize)
for x, y in debug.get('locations', []):
x = int(X - x / 2.0 * CROP_SIZE)
y = int(Y + y / 2.0 * CROP_SIZE)
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_world', []):
x = int(X - x * 4)
y = int(Y + y * 4)
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_birdview', []):
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_pixel', []):
S = R // 2
if 'big_cam' in observations:
rgb[y-S:y+S+1,x-S:x+S+1] = RED
else:
canvas[y-S:y+S+1,x-S:x+S+1] = RED
for x, y in debug.get('curve', []):
x = int(X - x * 4)
y = int(Y + y * 4)
try:
birdview[x,y] = [155, 0, 155]
except:
pass
if 'target' in debug:
x, y = debug['target'][:2]
x = int(X - x * 4)
y = int(Y + y * 4)
birdview[x-R:x+R+1,y-R:y+R+1] = [0, 155, 155]
#ox, oy = observations['orientation']
#rot = np.array([
# [ox, oy],
# [-oy, ox]])
#u = observations['node'] - observations['position'][:2]
#v = observations['next'] - observations['position'][:2]
#u = rot.dot(u)
#x, y = u
#x = int(X - x * 4)
#y = int(Y + y * 4)
#v = rot.dot(v)
#x, y = v
#x = int(X - x * 4)
#y = int(Y + y * 4)
if 'big_cam' in observations:
_write('Network input/output', 1, 0, canvas=rgb)
_write('Projected output', 1, 0, canvas=birdview)
full = _stick_together(rgb, birdview)
else:
full = _stick_together(canvas, birdview)
if 'image' in debug:
full = _stick_together(full, cu.visualize_predicted_birdview(debug['image'], 0.01))
if 'big_cam' in observations:
full = _stick_together(canvas, full, axis=0)
if use_cv:
semseg = get_segmentation_tensor(observations["semseg"].copy(), classes=DEFAULT_CLASSES)
class_colors = get_segmentation_colors(len(DEFAULT_CLASSES) + 1, class_indxs=DEFAULT_CLASSES)
semseg_rgb = get_rgb_segmentation(semantic_image=semseg, class_colors=class_colors)
semseg_rgb = np.uint8(semseg_rgb)
full = _stick_together_and_fill(full, semseg_rgb)
depth = np.uint8(observations["depth"]).copy()
depth = np.expand_dims(depth, axis=2)
depth = np.repeat(depth, 3, axis=2)
full = _stick_together_and_fill(full, depth)
if trained_cv:
semseg = observations["semseg"].copy()
class_colors = get_segmentation_colors(len(DEFAULT_CLASSES) + 1, class_indxs=DEFAULT_CLASSES)
semseg_rgb = get_rgb_segmentation(semantic_image=semseg, class_colors=class_colors)
semseg_rgb = np.uint8(semseg_rgb)
full = _stick_together_and_fill(full, semseg_rgb)
depth = cv2.normalize(observations["depth"].copy(), None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
depth = np.uint8(depth)
depth = np.expand_dims(depth, axis=2)
depth = np.repeat(depth, 3, axis=2)
full = _stick_together_and_fill(full, depth)
if show:
bzu.show_image('canvas', full)
bzu.add_to_video(full)
manual_break = False
def run_single(env, weather, start, target, agent_maker, seed, autopilot, show=False, move_camera=False,
use_cv=False, trained_cv=False):
# HACK: deterministic vehicle spawns.
env.seed = seed
env.init(start=start, target=target, weather=cu.PRESET_WEATHERS[weather])
print("Spawn points: ", (start, target))
if not autopilot:
agent = agent_maker()
else:
agent = agent_maker(env._player, resolution=1, threshold=7.5)
agent.set_route(env._start_pose.location, env._target_pose.location)
diagnostics = list()
result = {
'weather': weather,
'start': start, 'target': target,
'success': None, 't': None,
'total_lights_ran': None,
'total_lights': None,
'collided': None,
}
i = 0
listener = keyboard.Listener(on_release=on_release)
listener.start()
while env.tick():
if i % 50 == 0 and move_camera:
env.move_spectator_to_player()
i = 0 if not move_camera else i + 1
observations = env.get_observations()
if autopilot:
control, _, _, _ = agent.run_step(observations)
else:
control = agent.run_step(observations)
diagnostic = env.apply_control(control)
_paint(observations, control, diagnostic, agent.debug, env, show=show, use_cv=use_cv, trained_cv=trained_cv)
diagnostic.pop('viz_img')
diagnostics.append(diagnostic)
global manual_break
if env.is_failure() or env.is_success() or manual_break:
result['success'] = env.is_success()
result['total_lights_ran'] = env.traffic_tracker.total_lights_ran
result['total_lights'] = env.traffic_tracker.total_lights
result['collided'] = env.collided
result['t'] = env._tick
if manual_break:
print("Manual break activated")
result['success'] = False
manual_break = False
if not result['success']:
print("Evaluation route failed! Start: {}, Target: {}, Weather: {}".format(result["start"],
result["target"],
result["weather"]))
break
listener.stop()
return result, diagnostics
def on_release(key):
#print('{0} released'.format(key))
if key == keyboard.Key.page_down:
#print("pgdown pressed")
global manual_break
manual_break = True
def run_benchmark(agent_maker, env, benchmark_dir, seed, autopilot, resume, max_run=5, show=False, move_camera=False,
use_cv=False, trained_cv=False):
"""
benchmark_dir must be an instance of pathlib.Path
"""
summary_csv = benchmark_dir / 'summary.csv'
diagnostics_dir = benchmark_dir / 'diagnostics'
diagnostics_dir.mkdir(parents=True, exist_ok=True)
summary = list()
total = len(list(env.all_tasks))
if summary_csv.exists() and resume:
summary =
|
pd.read_csv(summary_csv)
|
pandas.read_csv
|
import datetime as dt
import pandas as pd
import logging
# from db.mysql import *
# from db.read import *
# from db.write import *
# from db.remove import *
# from .trigger import *
from ..db.mapping import map_transaction, map_holding
from ..db.write import bulk_save
from ..models import Transaction, Holding
from ..utils.fetch import get_yahoo_bvps, get_yahoo_cr
logger = logging.getLogger('main.trade')
def execute_order(list, type, s):
# Get Db Name
db_name = s.bind.url.database
# for each quote as dict in buy list
for dict in list:
# ticker,price in list format retrieved from dict
ticker, price = zip(*dict.items())
# ticker value
ticker = ticker[0]
# Current Close
cp = float(price[0])
try:
# BUY
if type == 'buy':
# Fetch and Calculate Capital
if db_name == 'tsxci':
bvps = get_yahoo_bvps(ticker+'.TO')
cr = get_yahoo_cr(ticker+'.TO')
elif db_name == 'csi300' and 'SH' in ticker:
bvps = get_yahoo_bvps(ticker.replace('SH','SS'))
cr = get_yahoo_cr(ticker.replace('SH','SS'))
else:
bvps = get_yahoo_bvps(ticker)
cr = get_yahoo_cr(ticker)
if cp > 0 and bvps is not None and cr is not None:
adjusted_cap = _get_adjusted_cap(cp, bvps, cr)
if adjusted_cap >= cp:
# execute buy function
buy(dict, adjusted_cap, s)
# SELL
elif cp > 0 and type == 'sell':
# print(db_name, dict, adjusted_cap)
# execute buy function
sell(dict, None, s)
except:
logger.debug('Failed to buy %s-%s ', db_name, ticker)
def buy(dict,cap,s):
build_transaction(dict,cap,'buy',s)
build_holding(dict,s)
def sell(dict,cap,engine):
# if there is already a holding table and found ticker in holding table
if build_transaction(dict,cap,'sell',engine) is not False:
# then transaction is completed and holding table is able to build and reflush
build_holding(dict,engine)
def build_transaction(dict, cap, type, s):
# current date
date = dt.datetime.today().strftime("%Y-%m-%d")
# ticker,price in list format retrieved from dict
ticker,price = zip(*dict.items())
# ticker value
ticker = ticker[0]
# price value is float
price = float(price[0])
# if type is buy qty is potitive and settlement is negative(sepnding)
if(type == 'buy'):
qty = int(cap/price)
settlement = (price*qty)*-1
# if type is sell, qty is negative and settlement is potitive(income)
elif(type == 'sell'):
# read holding table, if False, table not exits
df_existing_holding = pd.read_sql(s.query(Holding).statement, s.bind, index_col='symbol')
# if there is holding table and ticker is found in holding df
if (ticker in df_existing_holding.index.unique()):
# # if sell off all
# if cap == 10000:
# retrieve quntity which is negative
qty = (df_existing_holding[(df_existing_holding.index==ticker)].quantity.tolist()[0])*-1
# # if sell off a half
# elif cap == 5000:
# # retrieve quntity which is negative
# qty = ((df_existing_holding[(df_existing_holding.index==ticker)].quantity.tolist()[0])*-1)/2
# settlement amount is positive(income)
settlement = abs(price*qty)
# if no table or no ticker in holding
else:
logger.debug('No table or not holding: %s, cannot %s' % (ticker,type))
return False
# TRANSACTION dict
dict_transaction = {'date':date, 'symbol':ticker, 'type':type,'quantity':qty,'price':price,'settlement':settlement}
# TRANSACTION dataframe
df_transaction = pd.DataFrame.from_records([dict_transaction],index='date')
df_transaction.index =
|
pd.to_datetime(df_transaction.index)
|
pandas.to_datetime
|
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=
|
pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
|
pandas.merge
|
import pandas as pd
intervention_df = pd.read_csv("intervention_data.csv",dtype=str)
county_df = pd.read_csv("county_data.csv",dtype=str)
df =
|
pd.merge(intervention_df,county_df, how="left", left_on=["location_1","location_2"] , right_on=["STNAME","CTYNAME"])
|
pandas.merge
|
#!/usr/bin/env
import bisect
import time
import itertools
import scipy.optimize
import numpy as np
import pandas as pd
import pyomo.environ as p
from . import normalizer
from rogp.util.numpy import _pyomo_to_np
class Box:
def __init__(self, x, c, mu, cov, warping, invcov=None, hinv=None,
parent=None):
self.x = x
self.c = c
self.mu = mu
self.cov = cov
self.warping = warping
self.warp_inv = warping.warp_inv
self.warp_inv_scalar = warping.warp_inv_scalar
if invcov is None:
invcov = np.linalg.inv(cov)
self.invcov = invcov
if hinv is None:
self.ub, hinv_u = self.f(x[:, 1:2])
self.lb, hinv_l = self.f(x[:, 0:1])
self.hinv = np.concatenate((hinv_l, hinv_u), axis=1)
else:
self.hinv = hinv
bounds = np.matmul(c, hinv)
self.lb = bounds[0]
self.ub = bounds[1]
if parent is None:
self.max_corner = np.where(x != mu)
self.min_corner = np.where(x == mu)
else:
self.max_corner = parent.max_corner
self.min_corner = parent.min_corner
self.parent = parent
def f(self, x):
hinv = self.warp_inv(x)
return np.matmul(self.c, hinv), hinv
def get_children(self, lb, eps=1e-3):
children = []
axis = np.argmax(self.x[:, 1] - self.x[:, 0])
midpoint = (self.x[axis, 1] + self.x[axis, 0])/2
bracket = tuple(self.hinv[axis, :])
hinv_new = self.warp_inv_scalar(midpoint, bracket)
for i in range(2):
x = self.x.copy()
x[axis, i] = midpoint
hinv = self.hinv.copy()
hinv[axis, i] = hinv_new
if self.on_boundary(x):
child = Box(x, self.c, self.mu, self.cov, self.warping,
invcov=self.invcov, hinv=hinv, parent=self)
if child.ub >= lb:
children.append(child)
return children
def on_boundary(self, x=None):
if x is None:
x = self.x
# diff = x[self.max_corner] - self.mu
# c = np.matmul(np.matmul(diff.T, self.invcov), diff)[0, 0]
# if c < 1:
# return False
# diff = x[self.min_corner] - self.mu
# c = np.matmul(np.matmul(diff.T, self.invcov), diff)[0, 0]
# if c > 1:
# return False
# return True
cons_vals = []
for corner in itertools.product(*zip(x[:, 0], x[:, 1])):
diff = np.array(corner)[:, None] - self.mu
c = np.matmul(np.matmul(diff.T, self.invcov), diff)[0, 0]
cons_vals.append(c)
return min(cons_vals) <= 1 and max(cons_vals) >= 1
# for i in range(self.N):
# xm = self.mu[i, 0] - np.sqrt(np.diag(self.cov)[i])
# # if self.projection[i, 0] - ()self.mu[i, 0] > eps:
# if self.projection[i, 0] - xm > eps and self.grad[i, 0] > eps:
# x = self.x.copy()
# hinv = self.hinv.copy()
# x[i, 0] = self.projection[i, 0]
# hinv[i, 0] = self.projection_hinv[i, 0]
# child = Node(x, self.c, self.mu, self.cov,
# self.warping,
# invcov=self.invcov,
# hinv=hinv, parent=self)
# if child.ub >= lb:
# children.append(child)
# self.children = children
class Node:
def __init__(self, x, c, mu, cov, warping, invcov=None, hinv=None,
parent=None):
self.x = x
self.N = x.shape[0]
self.c = c
self.mu = mu
self.cov = cov
self.warping = warping
if invcov is None:
invcov = np.linalg.inv(cov)
self.invcov = invcov
if hinv is None:
self.ub, self.hinv = self.f(x)
else:
self.hinv = hinv
self.ub = np.matmul(c, hinv)
self.parent = parent
def f(self, x):
hinv = self.warping(x)
return np.matmul(self.c, hinv), hinv
def phi(self, x):
diff = x - self.mu
return np.matmul(np.matmul(diff.T, self.invcov), diff)[0, 0]
def solve_quadratic(self, e):
diff = self.x - self.mu
c = np.matmul(np.matmul(diff.T, self.invcov), diff) - 1
b = 2*np.matmul(np.matmul(diff.T, self.invcov), e)
a = np.matmul(np.matmul(e.T, self.invcov), e)
if (b**2 - 4*a*c) >= 0:
return (-b - np.sqrt(b**2 - 4*a*c))/(2*a)
else:
return None
def _project(self, e):
t = self.solve_quadratic(e)
if t is not None:
projection = self.x + e*t
grad = np.matmul(self.invcov, projection - self.mu)
return projection, grad
else:
return None, None
def project(self):
eps = 1e-4
diff = self.x - self.mu
sig = 1.0
t = None
n_iter = 0
# Make sure we're hitting the ellipsoid
# TODO: Replace this by linesearch
while t is None:
e = -diff - np.sqrt(np.diag(self.cov)[:, None])*sig
t = self.solve_quadratic(e)
sig = sig/1.01
n_iter += 1
if n_iter > 10000:
import ipdb; ipdb.set_trace()
projection, grad = self._project(e)
e_original = e.copy()
n_iter = 0
# Check if any element of gradient is negative
if np.any(grad < -eps):
def f(s, e, i):
e[i] = s
projection, grad = self._project(e)
if grad is None:
return -1
return np.min(grad)
i = np.where(grad < 0)
# Walk along axis for which gradient is negative until it becomes
# zero
if f(0, e, i) > 0:
# res = scipy.optimize.root_scalar(f, (e, i), bracket=(-0.1, 0))
try:
s_min = np.min(e_original[i])
res = scipy.optimize.root_scalar(f, (e, i),
bracket=(s_min, 0))
except:
import ipdb; ipdb.set_trace()
e[i] = res.root
projection, grad = self._project(e)
else:
# Set e[i] = 0.5*e[i]
# walk along on major axis (p.projection - c.x == 0) until
# gradient = 0
dd = self.parent.projection - self.x
j = np.where(dd == 0)
dd[i] = dd[i]/2
# Fix s_min
s_min = -0.5
try:
res = scipy.optimize.root_scalar(f, (dd, j), bracket=(s_min,0))
dd[j] = res.root
projection, grad = self._project(dd)
e = dd
except:
import ipdb; ipdb.set_trace()
self.projection, self.grad = self._project(e)
self.e = e
self.lb, self.projection_hinv = self.f(self.projection)
for k in range(self.x.shape[0]):
if self.x[k] - self.projection[k] < eps and not grad[k] < eps:
import ipdb; ipdb.set_trace()
return self.projection, self.lb
def get_children(self, lb, eps=1e-3):
children = []
for i in range(self.N):
xm = self.mu[i, 0] - np.sqrt(np.diag(self.cov)[i])
# if self.projection[i, 0] - ()self.mu[i, 0] > eps:
if self.projection[i, 0] - xm > eps and self.grad[i, 0] > eps:
x = self.x.copy()
hinv = self.hinv.copy()
x[i, 0] = self.projection[i, 0]
hinv[i, 0] = self.projection_hinv[i, 0]
child = Node(x, self.c, self.mu, self.cov,
self.warping,
invcov=self.invcov,
hinv=hinv, parent=self)
if child.ub >= lb:
children.append(child)
self.children = children
return children
class Tree():
def __init__(self, mu, cov, wf, c):
self.lb = float('-inf')
x = np.sqrt(np.diag(cov)[:, None]) + mu
self.mu = mu
root = Node(x, c, mu, cov, wf)
self.nodes = [root]
self.ubs = [root.ub]
self.ub = root.ub
self.x_lb = None
self.x_ub = root.x
def pop(self):
node = self.nodes.pop()
_ = self.ubs.pop()
return node
def insort(self, node):
i = bisect.bisect_right(self.ubs, node.ub)
self.ubs.insert(i, node.ub)
self.nodes.insert(i, node)
def is_empty(self):
return not bool(self.nodes)
def update_ub(self):
self.ub = self.ubs[-1]
self.x_ub = self.nodes[-1].x
def prune(self):
pass
def solve(self, eps=1e-3, max_iter=10000):
ts = time.time()
n_iter = 0
data = []
data2 = []
while self.nodes:
# import ipdb; ipdb.set_trace()
node = self.pop()
# Project node onto ellipsoid
node.project()
n_iter += 1
# Update lower bound
if node.lb > self.lb:
self.lb = node.lb
self.x_lb = node.projection
self.prune()
# Add children or fathom
for child in node.get_children(self.lb):
# if child.ub > self.ub:
# import ipdb;ipdb.set_trace()
self.insort(child)
# Stop if no nodes left to explore
if self.is_empty():
break
# Update upper bound
self.update_ub()
if n_iter % 100 == 0:
print(self.lb, self.ub, abs((self.ub-self.lb)/self.ub),
node.x.flatten(), node.projection.flatten(),
node.grad.flatten())
data.append(node.x.flatten())
data2.append(node.projection.flatten())
# Stop when converged
if abs((self.ub - self.lb)/self.ub) <= eps:
break
# Or when maximum iterations are reached
if n_iter > max_iter:
break
# import ipdb; ipdb.set_trace()
self.n_iter = n_iter
self.time = time.time() - ts
df = pd.DataFrame(np.array(data))
df['type'] = 'corner'
df2 = pd.DataFrame(np.array(data2))
df2['type'] = 'projection'
dfmu =
|
pd.DataFrame(self.mu.T)
|
pandas.DataFrame
|
from warnings import simplefilter
import ntpath
import os
import pandas as pd
import pickle
import run
from colorama import Fore
from pandas import DataFrame
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=UserWarning)
dir_path = os.path.dirname(os.path.realpath(__file__))
def input_path():
"""
Check path of the mounted system image and return nothing.
"""
print(
Fore.GREEN + 'Provide full path of mounted system image (.vhdx) ' + Fore.YELLOW +
'e.g. F:\C\Windows or F:\C ')
print(Fore.GREEN)
path = str(input('Path:')).strip()
mount = path[0:2]
# print (mount)
if ntpath.ismount(mount):
# print (mount + ' is mounted')
if path == mount + '\C':
sig_scan(path)
else:
sig_scan(path)
else:
print(Fore.YELLOW + '\nError -provide correct path. Mounted system image -try again \n')
input_path()
return 0
def sig_scan(path):
"""
Receives the location of the mounted files and runs sigcheck.exe and save the output for later analysis
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
sigcheck = dir_path + r'\bin\sigcheck.exe'
options = '-s -c -e -h -v -vt -w -nobanner'
save = dir_path + r'\csvFiles\sigcheckToOrganise.csv'
sig_cmd = sigcheck + ' ' + options + ' ' + save + ' ' + path
print(Fore.YELLOW + '\nThis execution might take some time....')
os.system(sig_cmd)
if os.stat(dir_path + r'\csvFiles\sigcheckToOrganise.csv').st_size <= 317:
print('Try again\n')
input_path()
else:
analysis()
return 0
def analysis():
"""
Analyse the output generated by sigcheck.exe using Machine Learning
"""
save = dir_path + r'\csvFiles\sigcheckToOrganise.csv'
sigs = pd.read_csv(save, encoding='utf-16', delimiter=',')
bigdata = sigs[['Path', 'Verified', 'Machine Type', 'Publisher', 'Description', 'MD5', 'VT detection']]
organised = DataFrame(bigdata)
path_organised = organised['Path']
df1 = organised.loc[organised['Verified'] == 'Unsigned']
df1 = DataFrame(df1)
# ML part #
filename = dir_path + r'\ML\cmdModel.sav'
vectfile = dir_path + r'\ML\vecFile.sav'
se_model = pickle.load(open(filename, 'rb'))
load_vect = pickle.load(open(vectfile, 'rb'))
text = load_vect.transform(path_organised)
print_this = se_model.predict(text)
print_prob = se_model.predict_proba(text) * 100
listdf =
|
pd.DataFrame(print_this)
|
pandas.DataFrame
|
import typer
import spotipy
import pandas as pd
import os
from loguru import logger
from spotify_smart_playlists.helpers import spotify_auth
from toolz import thread_last, mapcat, partition_all
from typing import List
def main(library_file: str, artists_file: str):
logger.info("Initializing Spotify client.")
spotify = spotipy.Spotify(client_credentials_manager=spotify_auth())
logger.info(f"Reading library from {library_file}.")
library_frame =
|
pd.read_csv(library_file)
|
pandas.read_csv
|
import os
import numpy as np
import pandas as pd
#postive_300 = pd.read_pickle(r'300_pos_exs.pkl')
#postive_300 = pd.read_pickle(r'63_1a2o_neg_exs.pkl')
#postive_300 = pd.read_pickle(r'1000_decoy_exs.pkl')
#postive_300 = pd.read_pickle(r'1000_pos_exs.pkl')
#print("postive samples:", len(postive_300))
def reformat_image(ex):
ex[ex == 0] = 20
ex = (ex - 2)/(20-2)
ex = 1.0 - ex
return ex
def fill_diagonal_distance_map(dist):
new_list = []
channel_number = dist.shape[0]
for j in range(channel_number):
Ndist = dist[j:j+1, :, :]
Ndist = np.squeeze(Ndist)
np.fill_diagonal(Ndist, 1)
new_list.append(Ndist)
return np.array(new_list)
def check_one_distance_map(dist, k):
Ndist = dist[k:k+1, :, :]
Ndist = np.squeeze(Ndist)
Ndist =
|
pd.DataFrame(Ndist)
|
pandas.DataFrame
|
#!/usr/bin/env python
# assume periodic-boundary conditions i.e. import from pyscf.pbc
import numpy as np
import pandas as pd
def ao_on_grid(cell):
from pyscf.pbc.dft import gen_grid,numint
coords = gen_grid.gen_uniform_grids(cell)
aoR = numint.eval_ao(cell,coords)
return aoR.astype(complex)
# end def ao_on_grid
def get_pyscf_psir(mo_vec,cell):
"""
Inputs:
mo_vec: 1D vector of AO coefficients representing a single MO
cell: pyscf.pbc.gto.Cell object, used to exact AO on grid
Output:
moR: MO on grid
"""
# get molecular orbital
aoR = ao_on_grid(cell)
rgrid_shape = np.array(cell.gs)*2+1 # shape of real-space grid
assert np.prod(rgrid_shape) == aoR.shape[0]
moR = np.dot(aoR,mo_vec)
return moR.reshape(rgrid_shape)
# end def get_pyscf_psir
def mo_coeff_to_psig(mo_coeff,aoR,cell_gs,cell_vol,int_gvecs=None):
"""
!!!! assume mo_coeff are already sorted from lowest to highest energy
Inputs:
mo_coeff: molecular orbital in AO basis, each column is an MO, shape (nao,nmo)
aoR: atomic orbitals on a real-space grid, each column is an AO, shape (ngrid,nao)
cell_gs: 2*cell_gs+1 should be the shape of real-space grid (e.g. (5,5,5))
cell_vol: cell volume, used for FFT normalization
int_gvecs: specify the order of plane-waves using reciprocal lattice points
Outputs:
3. plane-wave coefficients representing the MOs, shape (ngrid,nmo)
"""
# provide the order of reciprocal lattice vectors to skip
if int_gvecs is None: # use internal order
nx,ny,nz = cell_gs
from itertools import product
int_gvecs = np.array([gvec for gvec in product(
range(-nx,nx+1),range(-ny,ny+1),range(-nz,nz+1))],dtype=int)
else:
assert (int_gvecs.dtype is int)
# end if
npw = len(int_gvecs) # number of plane waves
# put molecular orbitals on real-space grid
moR = np.dot(aoR,mo_coeff)
nao,nmo = moR.shape
rgrid_shape = 2*np.array(cell_gs)+1
assert nao == np.prod(rgrid_shape)
# for each MO, FFT to get psig
psig = np.zeros([nmo,npw,2]) # store real & complex
for istate in range(nmo):
# fill real-space FFT grid
rgrid = moR[:,istate].reshape(rgrid_shape)
# get plane-wave coefficients (on reciprocal-space FFT grid)
moG = np.fft.fftn(rgrid)/np.prod(rgrid_shape)*cell_vol
# transfer plane-wave coefficients to psig in specified order
for igvec in range(npw):
comp_val = moG[tuple(int_gvecs[igvec])]
psig[istate,igvec,:] = comp_val.real,comp_val.imag
# end for igvec
# end for istate
return int_gvecs,psig
# end def mo_coeff_to_psig
def save_eigensystem(mf,gvec_fname = 'gvectors.dat'
,eigsys_fname = 'eigensystem.json',save=True):
import os
import pandas as pd
if os.path.isfile(eigsys_fname) and os.path.isfile(gvec_fname):
gvecs = np.loadtxt(gvec_fname)
eig_df =
|
pd.read_json(eigsys_fname)
|
pandas.read_json
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified).
__all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc',
'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran',
'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone',
'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked']
# Cell
#File: racdiv.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B02001 - Race
# Universe: Total Population
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def racdiv( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df_hisp = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
df_hisp = df_hisp.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df_hisp = df_hisp.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100
df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100
df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100
# =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2))
df1['Diversity_index'] = ( 1- (
( df1['African-American%'] /100 )**2
+( df1['White%'] /100 )**2
+( df1['American Indian%'] /100 )**2
+( df1['Asian%'] /100 )**2
+( df1['Native Hawaii/Pac Islander%'] /100 )**2
)*(
( df1['Hisp %'] /100 )**2
+(1-( df1['Hisp %'] /100) )**2
) ) * 100
return df1['Diversity_index']
# Cell
#File: pasi.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def pasi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100
return df1['Asian%NH']
# Cell
#File: elheat.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def elheat( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_004E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <elheat_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_004E','B25040_001E'])
)
update vital_signs.data
set elheat = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: empl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Employed Indicator
#input: Year
#output:
import pandas as pd
import glob
def empl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64
#/
#nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <empl_14> */ --
WITH tbl AS (
select csa,
( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY[ 'B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_007E','B23001_014E','B23001_021E','B23001_028E','B23001_035E','B23001_042E','B23001_049E','B23001_056E','B23001_063E','B23001_070E','B23001_093E','B23001_100E','B23001_107E','B23001_114E','B23001_121E','B23001_128E','B23001_135E','B23001_142E','B23001_149E','B23001_156E'])
)
update vital_signs.data
set empl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: fam.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def fam( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Delete Unassigned--Jail
df = df[df.index != 'Unassigned--Jail']
# Move Baltimore to Bottom
bc = df.loc[ 'Baltimore City' ]
df = df.drop( df.index[1] )
df.loc[ 'Baltimore City' ] = bc
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
return df1['18Under']
# Cell
#File: female.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def female( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheLadies'] = df[ 'B01001_026E_Total_Female' ]
return df1['onlyTheLadies']
# Cell
#File: femhhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: male, hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def femhhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
str19 = rootStr + ',_no_spouse_present'
femhh = str17 if year == '17' else str19 if year == '19' else str16
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
df1['FemaleHH'] = df[ femhh ] / df['B11005_002E_Total_Households_with_one_or_more_people_under_18_years'] * 100
df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households']
df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households']
df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18']
return df1['FemaleHH']
# Cell
#File: heatgas.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def heatgas( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_002E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <heatgas_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_002E','B25040_001E'])
)
update vital_signs.data
set heatgas = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hh40inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 25K-40K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh40inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh40inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_006E','B19001_007E','B19001_008E','B19001_001E'])
)
UPDATE vital_signs.data
set hh40inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh60inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household 45-60K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh60inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh60inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_009E','B19001_010E','B19001_011E','B19001_001E'])
)
UPDATE vital_signs.data
set hh60inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh75inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 60-70K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh75inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
#12/1
return fi.apply(lambda x: ( x[fi.columns[1] ] / x[fi.columns[0]])*100, axis=1)
"""
/* hh75inc */ --
WITH tbl AS (
select csa,
( value[1] / value[2] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_012E','B19001_001E'])
)
UPDATE vital_signs.data
set hh75inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hhchpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17001 - POVERTY STATUS IN THE PAST 12 MONTHS BY SEX BY AGE
# Universe: Population for whom poverty status is determined more information
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhchpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
denominators = addKey(df, denominators, col)
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] #Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S1701_C03_002E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
"""
/* <hhchpov_14> */
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12])
/ nullif(
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15] + value[16] + value[17] + value[18] + value[19] + value[20] + value[21] + value[22] + value[23] + value[24] ),
0)
) * 100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B17001_004E','B17001_005E','B17001_006E','B17001_007E','B17001_008E','B17001_009E','B17001_018E','B17001_019E','B17001_020E','B17001_021E','B17001_022E','B17001_023E','B17001_033E','B17001_034E','B17001_035E','B17001_036E','B17001_037E','B17001_038E','B17001_047E','B17001_048E','B17001_049E','B17001_050E','B17001_051E','B17001_052E'])
)
update vital_signs.data
set hhchpov = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hhm75.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income Over 75K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhm75( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import scikits.bootstrap as sci
def time_across_length(runs, ids, times, sequence_lengths):
df_min = pd.DataFrame({"run": runs, "time": times}, index=ids)
df_min = df_min.pivot(index=df_min.index, columns="run")["time"]
df_min = df_min.min(axis=1)
df_min = pd.DataFrame(df_min, columns=["time"])
df_lengths = pd.DataFrame.from_dict(sequence_lengths, orient="index")
df_lengths.columns = ["length"] # pandas 0.22 from-dict does not allow doing it dire.
def fill_not_solved(df):
for count in range(1, len(df_lengths)):
df = df.fillna(5000 + 500 * count ** 1.6, limit=1)
return df
length_grouped = df_lengths.join(df_min, how="outer").groupby("length")
length_grouped = length_grouped.transform(fill_not_solved)
df = df_lengths.join(length_grouped, how="outer")
df = df.set_index("length")
return df.sort_index()
def _add_timeout_row(df, timeout):
last_row = df.tail(1)
last_row.index = [timeout]
return df.append(last_row)
def _add_start_row(df):
start_row = pd.DataFrame({column: [0] for column in df.columns}, index=[1e-10])
return start_row.append(df)
def solved_across_time_per_run(runs, times, ci_alpha, dataset_size, timeout):
# 1.
df = pd.DataFrame({"run": runs, "times": times})
df = df.sort_values("times")
# 2.
# Duplicates make inverting with pivot impossible so count duplicates and add in 3
# From https://stackoverflow.com/a/41269427
df = (
df.groupby(df.columns.tolist()).size().reset_index().rename(columns={0: "counts"})
)
df = df.set_index("times")
df = df.pivot(index=df.index, columns="run")["counts"]
# 3.
df = df.fillna(0)
df = df.apply(np.cumsum)
# 4.
mean = df.mean(axis=1)
mean.name = "mean"
low_ci, high_ci = sci.ci(
df.T, alpha=ci_alpha, statfunction=lambda x: np.average(x, axis=0)
)
low_ci =
|
pd.Series(low_ci, index=df.index, name=f"low_ci_{ci_alpha}")
|
pandas.Series
|
from FINE.component import Component, ComponentModeling
from FINE import utils
import warnings
import pyomo.environ as pyomo
import pandas as pd
class Transmission(Component):
"""
Doc
"""
def __init__(self, esM, name, commodity, losses=0, distances=None,
hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1,
hasIsBuiltBinaryVariable=False, bigM=None,
operationRateMax=None, operationRateFix=None, tsaWeight=1,
locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None,
capacityFix=None, isBuiltFix=None,
investPerCapacity=0, investIfBuilt=0, opexPerOperation=0, opexPerCapacity=0,
opexIfBuilt=0, interestRate=0.08, economicLifetime=10):
# TODO add unit checks
# Set general component data
utils.checkCommodities(esM, {commodity})
self._name, self._commodity = name, commodity
self._distances = utils.checkAndSetDistances(esM, distances)
self._losses = utils.checkAndSetTransmissionLosses(esM, losses, distances)
# Set design variable modeling parameters
utils.checkDesignVariableModelingParameters(capacityVariableDomain, hasCapacityVariable,
hasIsBuiltBinaryVariable, bigM)
self._hasCapacityVariable = hasCapacityVariable
self._capacityVariableDomain = capacityVariableDomain
self._capacityPerPlantUnit = capacityPerPlantUnit
self._hasIsBuiltBinaryVariable = hasIsBuiltBinaryVariable
self._bigM = bigM
# Set economic data
self._investPerCapacity = utils.checkAndSetCostParameter(esM, name, investPerCapacity, '2dim')
self._investIfBuilt = utils.checkAndSetCostParameter(esM, name, investIfBuilt, '2dim')
self._opexPerOperation = utils.checkAndSetCostParameter(esM, name, opexPerOperation, '2dim')
self._opexPerCapacity = utils.checkAndSetCostParameter(esM, name, opexPerCapacity, '2dim')
self._opexIfBuilt = utils.checkAndSetCostParameter(esM, name, opexIfBuilt, '2dim')
self._interestRate = utils.checkAndSetCostParameter(esM, name, interestRate, '2dim')
self._economicLifetime = utils.checkAndSetCostParameter(esM, name, economicLifetime, '2dim')
self._CCF = self.getCapitalChargeFactor()
# Set location-specific operation parameters
if operationRateMax is not None and operationRateFix is not None:
operationRateMax = None
warnings.warn('If operationRateFix is specified, the operationRateMax parameter is not required.\n' +
'The operationRateMax time series was set to None.')
utils.checkOperationTimeSeriesInputParameters(esM, operationRateMax, locationalEligibility, '2dim')
utils.checkOperationTimeSeriesInputParameters(esM, operationRateFix, locationalEligibility, '2dim')
self._fullOperationRateMax = utils.setFormattedTimeSeries(operationRateMax)
self._aggregatedOperationRateMax = None
self._operationRateMax = utils.setFormattedTimeSeries(operationRateMax)
self._fullOperationRateFix = utils.setFormattedTimeSeries(operationRateFix)
self._aggregatedOperationRateFix = None
self._operationRateFix = utils.setFormattedTimeSeries(operationRateFix)
self._tsaWeight = tsaWeight
# Set location-specific design parameters
self._sharedPotentialID = sharedPotentialID
utils.checkLocationSpecficDesignInputParams(esM, hasCapacityVariable, hasIsBuiltBinaryVariable,
capacityMin, capacityMax, capacityFix,
locationalEligibility, isBuiltFix, sharedPotentialID,
'2dim')
self._capacityMin, self._capacityMax, self._capacityFix = capacityMin, capacityMax, capacityFix
self._isBuiltFix = isBuiltFix
# Set locational eligibility
operationTimeSeries = operationRateFix if operationRateFix is not None else operationRateMax
self._locationalEligibility = utils.setLocationalEligibility(esM, locationalEligibility, capacityMax,
capacityFix, isBuiltFix,
hasCapacityVariable, operationTimeSeries,
'2dim')
# Variables at optimum (set after optimization)
self._capacityVariablesOptimum = None
self._isBuiltVariablesOptimum = None
self._operationVariablesOptimum = None
def getCapitalChargeFactor(self):
""" Computes and returns capital charge factor (inverse of annuity factor) """
return 1 / self._interestRate - 1 / (pow(1 + self._interestRate, self._economicLifetime) * self._interestRate)
def addToEnergySystemModel(self, esM):
esM._isTimeSeriesDataClustered = False
if self._name in esM._componentNames:
if esM._componentNames[self._name] == TransmissionModeling.__name__:
warnings.warn('Component identifier ' + self._name + ' already exists. Data will be overwritten.')
else:
raise ValueError('Component name ' + self._name + ' is not unique.')
else:
esM._componentNames.update({self._name: TransmissionModeling.__name__})
mdl = TransmissionModeling.__name__
if mdl not in esM._componentModelingDict:
esM._componentModelingDict.update({mdl: TransmissionModeling()})
esM._componentModelingDict[mdl]._componentsDict.update({self._name: self})
def setTimeSeriesData(self, hasTSA):
self._operationRateMax = self._aggregatedOperationRateMax if hasTSA else self._fullOperationRateMax
self._operationRateFix = self._aggregatedOperationRateFix if hasTSA else self._fullOperationRateFix
def getDataForTimeSeriesAggregation(self):
fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \
else self._fullOperationRateMax
if fullOperationRate is not None:
fullOperationRate = fullOperationRate.copy()
uniqueIdentifiers = [self._name + "_operationRate_" + locationIn + '_' + locationOut
for locationIn, locationOut in fullOperationRate.columns]
compData =
|
pd.DataFrame(index=fullOperationRate.index, columns=uniqueIdentifiers)
|
pandas.DataFrame
|
#! /usr/bin/python
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pysam,os
import pandas as pd
import numpy as np
import seaborn as sns
from resources.utils import readBED
DWINDOW = 2500
FIGHEIGHT= 4
FIGWIDTH = 16
XLABEL = 'Chromosome'
YLABEL = 'Reads'
PALETTE = 'husl'
DPI = 400
#Define some human chromosome names
#add more sets as needed
#all and only these names will plot
withCH = ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9',
'chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17',
'chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']
CHROM = {'hs37d5': ['1','2','3','4','5','6','7','8','9','10','11','12','13',
'14','15','16','17','18','19','20','21','22','X','Y','MT'],
'hg19' : withCH,
'hg38' : withCH
}
def main(parser):
args = parser.parse_args()
bam = pysam.AlignmentFile(args.inBAM)
if args.nameFormat == 'all':
useChroms = bam.references
elif args.nameFormat is None:
useChroms = None
print(f'Guessing chromosome name format')
for name,chroms in list(CHROM.items()):
if chroms[0] in bam.references:
useChroms = chroms
break
else:
useChroms = CHROM[args.nameFormat]
if not useChroms:
raise CoveragePlot_Exception('Chromosome set not found. Check CHROM definition in script')
print(f'Using {args.nameFormat if args.nameFormat else name} chromosome name format')
#get all covered positions in a df
#recommended only for sparse (targeted) coverage!
print('Reading coverage')
cov = pd.DataFrame([(chrom,pile.pos,pile.nsegments)
for chrom in bam.references
for pile in bam.pileup(chrom,stepper='all')
if chrom in useChroms],
columns=['chr','pos','coverage'])
#map to get nbins given window size and contig length
nbins = {chrom : length//args.window + 1
for chrom,length in zip(bam.references,bam.lengths)}
#vector of intervals
intervals = pd.cut(cov.pos,range(0,max(bam.lengths),args.window))
#average for each window
print('Calculating mean values')
try:
meancov = cov.groupby(['chr',intervals]).coverage.mean()
except:
raise CoveragePlot_Exception(f'No coverage found for any of contigs {useChroms}')
#index of intervals for ordering results
cats = intervals.cat.categories
#if targets BED, set index to interval bin number
if args.targets:
targets = readBED(args.targets)
targets.index = pd.cut(targets.eval('(start+end)/2'),cats).cat.codes
#else set targets to empty df
else:
targets =
|
pd.DataFrame(columns=['ctg'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import random
random.seed(666)
import re
import os
import collections
import importlib
import itertools
from collections import Iterable
# e_mobil_noe_folder_path=r'Q:\2030\03 Projekte\DG KU\2.01.30170.1.0 - e-mobil EV Simulationen\\'
# e_mobil_noe_scenario_material_folder_path=r'Q:\2030\03 Projekte\DG KU\2.01.30170.1.0 - e-mobil EV Simulationen\
# 05 Simulationen\scenario_material\\'
# emobil_noe_scenario_name = test_scen[0]
#
# # Load mapping file. Cumulative mapping file (not meant to be changed for scenarios)
# df_mapping = pd.read_excel(e_mobil_noe_folder_path +
# r'05 Simulationen\Netzdaten\\' +
# 'Netzelemente_v3highpen.xlsx',
# sheet_name='nodes loads mapping',
# dtype={'ev_1': str, 'ev_2': str})
#
# df_load_map = pd.read_csv(e_mobil_noe_scenario_material_folder_path +
# 'df_input_map_loads_3P_high_pen_v1.csv',
# sep=';', decimal=',', index_col=0, header=0)
#
# # Load controller settings
# df_cont_setting = pd.read_csv(e_mobil_noe_scenario_material_folder_path +
# 'df_cont_set_' + emobil_noe_scenario_name + '.csv',
# sep=',', decimal='.', index_col=0)
class ScenarioPreparator:
def __init__(self, grid, loads_df, configuration, max_pen=0.8):
self.new_df_mapping = None
self.new_df_load_map = None
self.new_df_cont_setting = None
self.evse_dist = None
self.ev_scen_df = None
self.heatpump_df = None
print("----------------------------------------------------")
self.grid = grid
self.loads_df = loads_df
self.configuration = configuration
self.yearly_consumptions_of_profiles = None
self.max_pen = max_pen
print("starting preparing {} for EV simulations.".format(self.grid))
self.create_new_df_mapping()
self.calc_yearly_consumption_of_profiles()
self.create_new_df_load_map()
self.create_evse_distribution()
self.calc_yearly_consumption_of_profiles()
self.generate_ev_scenario(penetration=self.max_pen)
def create_evse_distribution(self):
# evse_dist_path = os.path.join(os.path.dirname(__file__), r"evse_distribution")
# self.evse_dist = pd.read_csv(os.path.join(evse_dist_path, grid + '_evse_distribution.csv'))
evse_list = []
for i, el in enumerate(self.grid.loads_df.index.str.split(' ')):
evse_list.append('EVSE_' + el[-1])
# anzahl_wohneinheiten = []
# for list in self.oscd_grid.loads_df['Alias Name 1'].str.split(';'):
# anzahl_wohneinheiten.append(list[0].split(' ')[-1])
# self.evse_df = pd.DataFrame(data={'Port 1': self.oscd_grid.loads_df['Port 1'].to_list(),},
# # 'Anzahl Wohneinheiten': anzahl_wohneinheiten},
# index=new_index) # columns=['Port 1', 'Anzahl Wohneinheiten']
# df = pd.DataFrame()
# df['node_name'] = self.new_df_cont_setting['node_name'].tolist()
# df['UN'] = 0.4
# df['object_name'] = self.new_df_cont_setting.index
# df['object_type'] = 'evse'
# df['object_number'] = [row.split('_')[1] for row in df['object_name']]
# self.new_df_mapping = pd.concat([self.new_df_mapping, df])
# self.new_df_mapping.sort_values(by=['node_name'], inplace=True)
self.new_df_load_map = self.new_df_load_map.assign(evse=evse_list)
def create_new_df_mapping(self):
new_df_mapping = pd.DataFrame()
object_names = list()
object_numbers = list()
for row in self.loads_df.iterrows():
name = row[1]['id']
object_names.append(name)
number = row[0]
object_numbers.append(number)
new_df_mapping['node_name'] = [str(el).replace(',', '').replace(' ', '_') for el
in self.loads_df['node'].tolist()]
new_df_mapping['UN'] = 0.4
new_df_mapping['object_name'] =
|
pd.Series(object_names)
|
pandas.Series
|
import os
import numpy as np
import pandas as pd
import statsmodels.api as sm
from datetime import datetime
from numpy.linalg import inv
from scipy.stats import t
from coinbase_analysis.coinbase_utilities import regression
def calc_residuals(df):
x = df.iloc[:, 0] # BTC/USD
y = df.iloc[:, 1] # ETH/USD
X1 = sm.add_constant(x)
# Y1 = sm.add_constant(y)
ols1 = sm.OLS(y, X1).fit()
# ols2 = sm.OLS(x, Y1).fit()
# calculate residuals here
residuals = ols1.resid
# residuals2 = ols2.resid
return residuals
def test_stationarity(residuals):
adf_data = pd.DataFrame(residuals)
adf_data.columns = ["y"]
adf_data["drift_constant"] = 1
# Lag residual
adf_data["y-1"] = adf_data["y"].shift(1)
adf_data.dropna(inplace=True)
# Diff between residual and lag residual
adf_data["deltay1"] = adf_data["y"] - adf_data["y-1"]
# Lag difference
adf_data["deltay-1"] = adf_data["deltay1"].shift(1)
adf_data.dropna(inplace=True)
target_y =
|
pd.DataFrame(adf_data["deltay1"], columns=["deltay1"])
|
pandas.DataFrame
|
import pandas as pd
from abc import ABC
from geopandas import GeoDataFrame
from carto.do_dataset import DODataset
from . import subscriptions
from ....utils.geom_utils import set_geometry
from ....utils.logger import log
_DATASET_READ_MSG = '''To load it as a DataFrame you can do:
df = pandas.read_csv('{}')
'''
_GEOGRAPHY_READ_MSG = '''To load it as a GeoDataFrame you can do:
from cartoframes.utils import decode_geometry
df = pandas.read_csv('{}')
gdf = GeoDataFrame(df, geometry=decode_geometry(df['geom']))
'''
GEOM_COL = 'geom'
class CatalogEntity(ABC):
"""This is an internal class the rest of the classes related to the catalog discovery extend.
It contains:
- Properties: `id`, `slug` (a shorter ID).
- Static methods: `get`, `get_all`, `get_list` to retrieve elements or lists of objects in the catalog such as
datasets, categories, variables, etc.
- Instance methods to convert to pandas Series, Python dict, compare instances, etc.
As a rule of thumb you don't directly use this class, it is documented for inheritance purposes.
"""
id_field = 'id'
_entity_repo = None
export_excluded_fields = ['summary_json', 'geom_coverage']
def __init__(self, data):
self.data = data
@property
def id(self):
"""The ID of the entity."""
return self.data[self.id_field]
@property
def slug(self):
"""The slug (short ID) of the entity."""
try:
return self.data['slug']
except KeyError:
return None
@classmethod
def get(cls, id_):
"""Get an instance of an entity by ID or slug.
Args:
id_ (str):
ID or slug of a catalog entity.
Raises:
CatalogError: if there's a problem when connecting to the catalog or no entities are found.
"""
return cls._entity_repo.get_by_id(id_)
@classmethod
def get_all(cls, filters=None):
"""List all instances of an entity.
Args:
filters (dict, optional):
Dict containing pairs of entity properties and its value to be used as filters to query the available
entities. If none is provided, no filters will be applied to the query.
"""
return cls._entity_repo.get_all(filters)
@classmethod
def get_list(cls, id_list):
"""Get a list of instance of an entity by a list of IDs or slugs.
Args:
id_list (list):
List of ID or slugs of entities in the catalog to retrieve instances.
Raises:
CatalogError: if there's a problem when connecting to the catalog or no entities are found.
"""
return cls._entity_repo.get_by_id_list(id_list)
def to_series(self):
"""Converts the entity instance to a pandas Series."""
return
|
pd.Series(self.data)
|
pandas.Series
|
import numpy as np
import pandas as pd
def normalize_minmax(fname_in, fname_out, min_val=0, max_val=1, columns=None):
data =
|
pd.read_csv(fname_in + '.csv', delimiter=',')
|
pandas.read_csv
|
import warnings
warnings.filterwarnings("ignore")
from utils import setup_seed, get_time_dif, view_gpu_info
import pandas as pd
from copy import deepcopy
import math
import random
from tqdm import tqdm
import numpy as np
import json
import re
def denoising(source_file=r'../data/raw_data/train.xlsx', target_file=r'../data/raw_data/train_denoised.xlsx'):
"""
对原数据 train.xlsx 进行去噪,并保存为 train_denoised.xlsx
主要进行了以下处理:1 去除多余空格 2 统一'|' 3 字母转换为小写 4 去除约束算子 5 float(nan)转换为''
@return:
"""
def process_field(field):
"""
对字段进行处理: 1 去掉空格 2 统一'|' 3 全部用小写
@param field
@return: field
"""
field = field.replace(' ','').replace('|', '|')
field = field.lower()
return field
def question_replace(question):
"""
对问题进行去噪
:param question:
:return:
"""
question = question.replace('二十', '20')
question = question.replace('三十', '30')
question = question.replace('四十', '40')
question = question.replace('五十', '50')
question = question.replace('六十', '60')
question = question.replace('七十', '70')
question = question.replace('八十', '80')
question = question.replace('九十', '90')
question = question.replace('一百', '100')
question = question.replace('十块', '10块')
question = question.replace('十元', '10元')
question = question.replace('六块', '6块')
question = question.replace('一个月', '1个月')
question = question.replace('2O', '20')
if '一元一个g' not in question:
question = question.replace('一元', '1元')
if 'train' in source_file:
question = question.replace(' ', '_')
else:
question = question.replace(' ', '')
question = question.lower()
return question
raw_data = pd.read_excel(source_file)
# 训练数据
if 'train' in source_file:
raw_data['有效率'] = [1] * len(raw_data)
for index, row in raw_data.iterrows():
row['用户问题'] = question_replace(row['用户问题'])
# TODO 检查是否有效
# if row['实体'] == '畅享套餐促销优惠':
# row['实体'] = '畅享套餐促销优惠活动'
for index_row in range(len(row)):
field = str(row.iloc[index_row])
if field == 'nan':
field = ''
row.iloc[index_row] = process_field(field)
if not (row['约束算子'] == 'min' or row['约束算子'] == 'max'):
row['约束算子'] = ''
raw_data.iloc[index] = row
# 测试数据
else:
length = []
columns = raw_data.columns
for column in columns:
length.append(len(str(raw_data.iloc[1].at[column])))
max_id = np.argmax(length)
for index, row in raw_data.iterrows():
# raw_data.loc[index, 'query'] = question_replace(row['query'])
raw_data.loc[index, columns[max_id]] = question_replace(row[columns[max_id]])
# print(raw_data)
raw_data.to_excel(target_file, index=False)
def read_synonyms():
"""
读取synonyms.txt 并转换成dict
@return:
"""
synonyms_dict = {}
with open(r'../data/raw_data/synonyms.txt', 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip('\n')
ent, syn_str = line.split()
syns = syn_str.split('|')
synonyms_dict[ent] = syns
return synonyms_dict
def create_argument_data(synonyms_dict):
"""
对数据进行同义词替换,并保存为新的xlsx
@param synonyms_dict: 来自synonyms.txt
@return:
"""
raw_data = pd.read_excel(r'../data/raw_data/train_denoised.xlsx')
new_data = []
for index, row in raw_data.iterrows():
question = row['用户问题']
for k, vs in synonyms_dict.items():
if k in question:
if '无' in vs:
continue
for v in vs:
row_temp = deepcopy(row)
row_temp['用户问题'] = question.replace(k, v)
new_data.append(row_temp)
new_data =
|
pd.DataFrame(new_data)
|
pandas.DataFrame
|
"""
THIS ROUTINE IS NO LONGER USED, SINCE ITS DATA SOURCE WAS DISCCONTINUED
"""
# TODO Clear this routine from the project
import numpy as np
import pandas as pd
import datetime as dt
from tqdm import tqdm
import matplotlib.pyplot as plt
from quantfin.calendars import DayCounts
from quantfin.data import grab_connection, tracker_uploader
query = 'select * from raw_tesouro_nacional'
conn = grab_connection()
df_raw = pd.read_sql(query, con=conn)
all_trackers = pd.DataFrame()
# ===============
# ===== LFT =====
# ===============
df = df_raw[df_raw['bond_type'] == 'LFT']
df_buy = df.pivot('reference_date', 'maturity', 'preco_compra').dropna(how='all')
df_sell = df.pivot('reference_date', 'maturity', 'preco_venda').dropna(how='all')
# ----- curto -----
df_tracker =
|
pd.DataFrame(columns=['Current', 'Ammount', 'Price', 'Notional'])
|
pandas.DataFrame
|
from .io import read_annotations, save_annotations
import warnings
import glob
import os
import os.path as path
import numpy as np
import pandas as pd
class AnnotationFormat:
"""
Class containing useful data for accessing and manipulating annotations.
I've tried to extract as many "magic constants" out of the actual methods as
possible so that they can be grouped here and changed easily in the future.
"""
# Column Names
LEFT_COL = "Begin Time (s)"
RIGHT_COL = "End Time (s)"
TOP_COL = "High Freq (Hz)"
BOT_COL = "Low Freq (Hz)"
CLASS_COL = "Species"
CLASS_CONF_COL = "Species Confidence"
CALL_UNCERTAINTY_COL = "Call Uncertainty"
# Column which cannot be left as NA or NaN
REQUIRED_COLS = [
LEFT_COL,
RIGHT_COL,
TOP_COL,
BOT_COL,
CLASS_COL,
CLASS_CONF_COL,
CALL_UNCERTAINTY_COL
]
# Dictionary mapping annotator's noisy labels to a constant class name
CLASS_LABEL_MAP = {
"humpback whale": "hb",
"hb whale": "hb",
"hb?": "hb",
"hhb": "hb",
"hb": "hb",
"jn": "hb",
"sea lion": "sl",
"sl": "sl",
"rockfish": "rf",
"rf": "rf",
"killer whale": "kw",
"kw": "kw",
"?": "?",
"mech": "?",
"mechanical": "?"
}
# Boxes need to span at least 1ms and 1/100 Hz
# If a box is dropped for this reason, it was likely created by mistake.
BOX_MIN_DURATION = 1e-3
BOX_MIN_FREQ_RANGE = 1e-2
# Useful glob patterns for finding annotation files and mispellings
PATTERN = "*.*-*.txt"
BAD_PATTERNS = ["*.*_*.txt"]
_format = AnnotationFormat()
def get_all_classes(annotation_paths, verbose=False):
"""
Returns a list of all classes seen in the annotation files.
Parameters
annotation_paths : list of str
paths to the .txt annotation files (eg: ['/foo/bar/annots.txt'])
verbose : bool, optional (default: False)
flag to control whether debug information is printed
Returns
classes : list of str
List containing all unique classes
"""
classes = set()
for annot_fname in annotation_paths:
classes.update(list(read_annotations(annot_fname)[_format.CLASS_COL].unique()))
classes = sorted([s for s in list(classes)])
if verbose:
print("Classes found: ", classes)
return classes
def get_area(annotation):
"""
Calculates the area of a single annotation box.
Parameters
annotation : pandas Series
a single annotation
Returns
area : float
Area of the bounding box (Hz*Seconds)
"""
return ((annotation[_format.RIGHT_COL] - annotation[_format.LEFT_COL])
* (annotation[_format.TOP_COL] - annotation[_format.BOT_COL]))
def get_all_annotations_in_directory(directory, check_misnomers=True):
"""
Uses glob to construct a list of paths to each file in the provided
directory which matches the correct formatting of an annotation file name.
Parameters
directory : str
path to the directory of interest
check_misnomers : bool, optional (default: True)
flag to control whether to warn about potential filename mistakes
Returns
good_results : List of str
Paths found in the given directory which match the filename pattern
"""
good_results = glob.glob(path.join(directory, _format.PATTERN))
if check_misnomers:
# Check if there are any incorrectly named files that may be overlooked
bad_results = []
for bad_pattern in _format.BAD_PATTERNS:
bad_results.extend(glob.glob(path.join(directory, bad_pattern)))
if len(bad_results) > 0:
warnings.warn(
"({}) Some files in {} may be incorrectly named: " \
"[\n {}\n]".format(
"get_all_annotations_in_directory",
directory,
",\n ".join(bad_results)
)
)
return good_results
def levenshteinDistanceDP(token1, token2):
"""
Efficiently calculates the Levenshtein distance (edit distance) between two
strings. Useful for determining if a column name has been misspelled.
The cost of insertions, deletions, and substitutions are all set to 1.
Parameters
token1 : str
first token
token2 : str
second token
Returns
distance : int
the number of single-character edits required to turn token1 into token2
"""
distances = np.zeros((len(token1) + 1, len(token2) + 1))
for t1 in range(len(token1) + 1):
distances[t1][0] = t1
for t2 in range(len(token2) + 1):
distances[0][t2] = t2
a, b, c = 0, 0, 0
for t1 in range(1, len(token1) + 1):
for t2 in range(1, len(token2) + 1):
if (token1[t1-1] == token2[t2-1]):
distances[t1][t2] = distances[t1 - 1][t2 - 1]
else:
a = distances[t1][t2 - 1]
b = distances[t1 - 1][t2]
c = distances[t1 - 1][t2 - 1]
if (a <= b and a <= c):
distances[t1][t2] = a + 1
elif (b <= a and b <= c):
distances[t1][t2] = b + 1
else:
distances[t1][t2] = c + 1
return distances[len(token1)][len(token2)]
def _print_n_rejected(n_rejected, reason):
if n_rejected > 0:
print("Rejecting {} annotation(s) for {}".format(
n_rejected,
reason
))
def clean_annotations(annotations, verbose=False):
"""
Cleans a single DataFrame of annotations by identifying invalid annotations
and separating them from the valid annotations.
Additionally checks for other formatting issues such as misnamed columns.
Parameters
annotations : DataFrame
a set of annotations from a single recording
verbose : bool, optional (default: False)
flag to control whether debug information is printed
Returns
valid_annotations : DataFrame
the annotations that passed every filter
invalid_annotations : DataFrame
the annotations that failed at least one filter
"""
annotations = annotations.copy()
original_size = len(annotations)
# Check for misnamed columns
column_map = {}
for req_col in _format.REQUIRED_COLS:
# For each required column, find the column with a dist <= 1.
matches = []
for col in annotations.columns:
dist = levenshteinDistanceDP(col, req_col)
if dist <= 1:
matches.append(col)
if dist > 0:
column_map[col] = req_col
if len(matches) > 1:
warnings.warn(
"({}) Required Column '{}' matches multiple " \
"columns: [{}]".format(
"clean_annotations",
req_col,
", ".join(matches)
)
)
# This required column is ambiguous. Stop and reject all.
# TODO: Write logic to combine ambiguous columns automatically
return pd.DataFrame(columns=annotations.columns), annotations
if len(matches) == 0:
warnings.warn(
"({}) Required Column '{}' does not match any existing " \
"columns: [{}]".format(
"clean_annotations",
req_col,
", ".join(list(annotations.columns))
)
)
# This required column was not found. Stop and reject all.
return pd.DataFrame(columns=annotations.columns), annotations
if len(column_map) > 0:
if verbose:
print(
"Applying column name corrections: [{}]".format(
", ".join(
["'{}':'{}'".format(k,v) for k,v in column_map.items()]
)
)
)
annotations.rename(columns=column_map, inplace=True)
# As we filter, place slices of invalid annotations here
invalid_annots = []
# ------------------- Fill default values where missing -------------------
default_values = {
_format.CLASS_CONF_COL: 5,
_format.CALL_UNCERTAINTY_COL: 0
}
annotations.fillna(default_values, inplace=True)
# If they had NaNs, then the columns would have been upcast to float64, so
# cast them back to int64
annotations = annotations.astype({
_format.CLASS_CONF_COL: "int64",
_format.CALL_UNCERTAINTY_COL: "int64"
})
# ------------------------ Filter by Species label ------------------------
classes = annotations[_format.CLASS_COL].str.lower()
valid_mask = classes.isin(list(_format.CLASS_LABEL_MAP.keys()))
if verbose:
_print_n_rejected((~valid_mask).sum(), "bad species labels")
invalid_annots.append(annotations.loc[~valid_mask])
annotations = annotations.loc[valid_mask]
annotations[_format.CLASS_COL] = \
annotations[_format.CLASS_COL].str.lower().map(_format.CLASS_LABEL_MAP)
# ----------- Filter by Invalid Confidence / Uncertainty Values -----------
valid_mask = (
(annotations[_format.CLASS_CONF_COL] >= 1) \
& (annotations[_format.CLASS_CONF_COL] <= 5) \
& (annotations[_format.CALL_UNCERTAINTY_COL].isin([0,1]))
)
if verbose:
_print_n_rejected(
(~valid_mask).sum(),
"bad confidence or uncertainty values"
)
invalid_annots.append(annotations.loc[~valid_mask])
annotations = annotations.loc[valid_mask]
# ---------------------- Filter by any remaining NAs ----------------------
valid_mask = ~(annotations[_format.REQUIRED_COLS].isna().any(axis=1))
if verbose:
_print_n_rejected((~valid_mask).sum(), "missing a required value")
invalid_annots.append(annotations.loc[~valid_mask])
annotations = annotations.loc[valid_mask]
# --------------------- Filter by Invalid Box Extents ---------------------
valid_mask = (
(annotations[_format.RIGHT_COL] - annotations[_format.LEFT_COL] \
>= _format.BOX_MIN_DURATION) \
& (annotations[_format.TOP_COL] - annotations[_format.BOT_COL] \
>= _format.BOX_MIN_FREQ_RANGE)
)
if verbose:
_print_n_rejected((~valid_mask).sum(), "invalid box extents")
invalid_annots.append(annotations.loc[~valid_mask])
annotations = annotations.loc[valid_mask]
if verbose:
filtered_size = len(annotations)
print("{:2%} of annotations passed all filters".format(
filtered_size / original_size
))
invalid_annots =
|
pd.concat(invalid_annots)
|
pandas.concat
|
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo = algos.SelectAll(include_no_data=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts =
|
pd.date_range('2010-01-01', periods=10)
|
pandas.date_range
|
import pandas as data
import os.path
from datetime import datetime
cut_off = datetime.strptime('28-03-2020', '%d-%m-%Y').date() #Extract historical data till this date.
base_dir = os.path.join(os.path.dirname(__file__), "../") #Obtain the path to the base directory for absosulte addressing.
def init_bucket(frame, date, region):
'''Initialize a new bucket if it does not previously exist at frame[date][region].'''
if(frame.get(date) == None): #Partition does not exist. Create new.
frame[date] = dict()
if(frame[date].get(region) == None): #Bucket does not exist. Create new.
frame[date][region] = [0, 0, 0]
def generate_dataset(record):
'''Generate a dataframe from an existing record.'''
rows = [[region] + tally for region, tally in record.items()]
df = data.DataFrame(data = rows, columns = ["Region", "Confirmed", "Recovered", "Deceased"])
df = df.sort_values(by = "Region")
df = df.append(df.sum(numeric_only = True), ignore_index = True)
df.iloc[-1, 0] = "National Total"
return(df)
increments = dict() #Increments in cases per day bucketted over regions.
#Load historical data.
patients_dataset =
|
data.read_csv("https://raw.githubusercontent.com/covid19india/CovidCrowd/master/data/raw_data.csv", parse_dates = ['Date Announced', 'Status Change Date'], dayfirst = True)
|
pandas.read_csv
|
from stockquant.util.models import TaskTable
from stockquant.util import logger
from datetime import datetime as dtime
from stockquant.odl.models import BS_Daily, BS_Stock_Basic
from stockquant.util.stringhelper import TaskEnum
from stockquant.odl.baostock.util import query_history_k_data_plus
from stockquant.util.database import engine, session_scope
from sqlalchemy import func, or_, and_
import datetime
import pandas as pd
_logger = logger.Logger(__name__).get_log()
def update_task():
"""
更新任务表-BS日线历史A股K线数据
"""
# 删除历史任务记录
TaskTable.del_with_task(TaskEnum.BS日线历史A股K线数据)
with session_scope() as sm:
# 通过BS证券基本资料和A股K线数据的每个股票的最新交易时间,查出所有需要更新的股票及更新时间
cte = sm.query(BS_Daily.code, func.max(BS_Daily.date).label("mx_date")).group_by(BS_Daily.code).cte("cte")
query = sm.query(
BS_Stock_Basic.code, BS_Stock_Basic.ts_code, BS_Stock_Basic.ipoDate, BS_Stock_Basic.outDate, cte.c.mx_date
)
query = query.join(cte, BS_Stock_Basic.code == cte.c.code, isouter=True)
query = query.filter(
or_(
and_(BS_Stock_Basic.outDate == None, cte.c.mx_date < dtime.now().date()), # noqa
cte.c.mx_date == None,
BS_Stock_Basic.outDate > cte.c.mx_date,
)
)
codes = query.all()
tasklist = []
for c in codes:
tasktable = TaskTable(
task=TaskEnum.BS日线历史A股K线数据.value,
task_name=TaskEnum.BS日线历史A股K线数据.name,
ts_code=c.ts_code,
bs_code=c.code,
begin_date=c.ipoDate if c.mx_date is None else c.mx_date + datetime.timedelta(days=1),
end_date=c.outDate if c.outDate is not None else dtime.now().date(),
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def _load_data(dic: dict):
"""
docstring
"""
content = dic["result"]
bs_code = dic["bs_code"]
# frequency = dic["frequency"]
# adjustflag = dic["adjustflag"]
if content.empty:
return
table_name = BS_Daily.__tablename__
try:
content["date"] = pd.to_datetime(content["date"], format="%Y-%m-%d")
# content['code'] =
content["open"] = pd.to_numeric(content["open"], errors="coerce")
content["high"] = pd.to_numeric(content["high"], errors="coerce")
content["low"] = pd.to_numeric(content["low"], errors="coerce")
content["close"] = pd.to_numeric(content["close"], errors="coerce")
content["preclose"] = pd.to_numeric(content["preclose"], errors="coerce")
content["volume"] = pd.to_numeric(content["volume"], errors="coerce")
content["amount"] = pd.to_numeric(content["amount"], errors="coerce")
# content['adjustflag'] =
content["turn"] = pd.to_numeric(content["turn"], errors="coerce")
content["tradestatus"] = pd.to_numeric(content["tradestatus"], errors="coerce").astype(bool)
content["pctChg"] =
|
pd.to_numeric(content["pctChg"], errors="coerce")
|
pandas.to_numeric
|
import pandas as pd
from sklearn import model_selection as skl
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
data =
|
pd.read_csv('insurance.csv')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import os
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import warnings
import random
random.seed(10)
np.random.seed(42)
warnings.filterwarnings('ignore')
user_input = input("Enter the path of your file: ")
assert os.path.exists(user_input), "I did not find the file at, "+str(user_input)
data = pd.read_csv(user_input)
test=data.copy()
data=data.rename(columns={"Seat Fare Type 1":"f1","Seat Fare Type 2":"f2"})
data=data.dropna(subset=["f1","f2"], how='all')
test=data.copy()
test["Service Date"]=pd.to_datetime(test["Service Date"],format='%d-%m-%Y %H:%M')
test["RecordedAt"]=pd.to_datetime(test["RecordedAt"],format='%d-%m-%Y %H:%M')
test["Service Date"]-test["RecordedAt"]
data["timediff"]=test["Service Date"]-test["RecordedAt"]
test["timediff"]=test["Service Date"]-test["RecordedAt"]
days=test["timediff"].dt.days
hours=test["timediff"].dt.components["hours"]
mins=test["timediff"].dt.components["minutes"]
test["abstimediff"]=days*24*60+hours*60+mins
test["f1"]=test["f1"].astype(str)
test["f1_1"]=test.f1.str.split(',')
#print(test)
test["f2"]=test["f2"].astype(str)
test["f2_1"]=test.f2.str.split(',')
test=test.reset_index(drop=True)
arr=[]
var=[]
for i in range(0,len(test["f1_1"])):
if test["f1_1"][i][0]=='nan':
arr.append(pd.to_numeric(test["f2_1"][i]).mean())
var.append(pd.to_numeric(test["f2_1"][i]).std())
#print(x)
else:
arr.append(pd.to_numeric(test["f1_1"][i]).mean())
var.append(pd.to_numeric(test["f1_1"][i]).std())
test["meanfare"]=arr
test["devfare"]=var
test["abstimediff"]=(test["abstimediff"]-test["abstimediff"].mean())/test["abstimediff"].std()
test["meanfare"]=(test["meanfare"]-test["meanfare"].mean())/test["meanfare"].std()
test["is_type1"]=1
test.loc[test["f1"]=='nan',"is_type1"]=0
test["devfare"]=(test["devfare"]-test["devfare"].mean())/test["devfare"].std()
processed_data = test
#print(processed_data)
data = processed_data
data["is_weekend"]=0
data.loc[data["Service Date"].dt.dayofweek==5,"is_weekend"]=1
data.loc[data["Service Date"].dt.dayofweek==6,"is_weekend"]=1
data_copy=data.copy()
data=data.drop(["f1","f2","Service Date","RecordedAt","timediff","f1_1","f2_1"],axis=1)
data["maxtimediff"]=data["abstimediff"]
data=data_copy
data=data.drop(["f1","f2","Service Date","RecordedAt","timediff","f1_1","f2_1"],axis=1)
#print(data)
data=data.groupby("Bus").agg(['mean','max'])
data=data.drop([( 'is_weekend', 'max'),( 'is_type1', 'max'),],axis=1)
data=data.drop([( 'devfare', 'max'),( 'meanfare', 'max'),],axis=1)
data_copy=data.copy()
data=data_copy
data.columns = ['{}_{}'.format(x[0], x[1]) for x in data.columns]
#print(data)
data=data.reset_index()
X=data.drop("Bus",axis=1)
features = X
#data = features
#print(data)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(X)
model1 = KMeans(n_clusters=6)
model1.fit(pca_result)
centroids1 = model1.cluster_centers_
labels = model1.labels_
bus = data["Bus"]
bus=pd.DataFrame(bus)
y=pd.concat((bus,pd.DataFrame(pca_result),pd.DataFrame(labels,columns = ["Cluster"])),axis=1)
y = y.rename(columns = {0:"pca1",1:"pca2"})
# print(y)
cluster=[]
for i in range(6):
cluster.append(y[y["Cluster"]==i])
# print(labels)
X0=cluster[0][["pca1","pca2"]].to_numpy()
m0 = KMeans(n_clusters=2)
m0.fit(X0)
X1=cluster[1][["pca1","pca2"]].to_numpy()
m1 = KMeans(n_clusters=7)
m1.fit(X1)
X2=cluster[2][["pca1","pca2"]].to_numpy()
m2 = KMeans(n_clusters=6)
m2.fit(X2)
X3=cluster[3][["pca1","pca2"]].to_numpy()
m3 = KMeans(n_clusters=3)
m3.fit(X3)
X4=cluster[4][["pca1","pca2"]].to_numpy()
m4 = KMeans(n_clusters=2)
m4.fit(X4)
X5=cluster[5][["pca1","pca2"]].to_numpy()
m5 = KMeans(n_clusters=6)
m5.fit(X5)
def leader_follower(cluster): #only bus and prob for a particular cluster sorted
cluster["Follows"] = ""
cluster["Confidence Score 1"] = ""
cluster["Is followed by"] = ""
cluster["Confidence Score 2"] = ""
maxprob = cluster["Probability"][0]
leader = cluster["Bus"][0]
#confidence_score_1 = cluster["Probability"][0]
cluster["Follows"][0] = "Independent"
cluster["Confidence Score 1"][0] = 1-cluster["Probability"][0]
#confidence_score_2 =
if len(cluster)==1:
return cluster
follower = cluster["Bus"][1]
for i in range(1,len(cluster)):
cluster["Follows"][i] = leader
cluster["Confidence Score 1"][i] = cluster["Probability"][i]/cluster["Probability"][i-1]
leader = cluster["Bus"][i]
#confidence_score_1 = cluster["Probability"][i]
for i in range(0,len(cluster)-1):
cluster["Is followed by"][i] = follower
follower = cluster["Bus"][i+1]
cluster["Confidence Score 2"][i] = cluster["Probability"][i+1]/cluster["Probability"][i]
#cluster["Is followed by"][len(cluster)-1] = ""
#cluster["Confidence Score 2"][i]
return cluster
def dist_from_own_centre(pca_result,centroids,labels):
arr=np.zeros(len(labels))
for i in range(len(labels)):
arr[i]=1/((np.sum((pca_result[i] - centroids[labels[i]])**2))**0.5+1e-8)
return arr
def dist_from_other_centre(pca_result,centroids,labels):
arr=np.zeros(len(labels))
for i in range(len(labels)):
for j in range(len(centroids)):
arr[i] += 1/((np.sum((pca_result[i] - centroids[j])**2))**0.5+1e-8)
return arr
prob0 = dist_from_own_centre(X0,m0.cluster_centers_,m0.labels_)/dist_from_other_centre(X0,m0.cluster_centers_,m0.labels_)
cluster[0]["Probability"] = prob0
cluster[0]["labels"] = m0.labels_
output=[]
result=[]
for i in range(max(m0.labels_)+1):
output.append(cluster[0][cluster[0]["labels"]==i])
output[i] = output[i].sort_values("Probability",ascending = False)
output[i] = output[i].reset_index()
result.append(leader_follower(output[i]))
Y0 = result[0]
for i in range(1,len(result)):
Y0 = pd.concat((Y0,result[i]))
Y0=Y0.set_index("index")
# print(Y0)
prob1 = dist_from_own_centre(X1,m1.cluster_centers_,m1.labels_)/dist_from_other_centre(X1,m1.cluster_centers_,m1.labels_)
cluster[1]["Probability"] = prob1
cluster[1]["labels"] = m1.labels_
output=[]
result=[]
for i in range(max(m1.labels_)+1):
output.append(cluster[1][cluster[1]["labels"]==i])
output[i] = output[i].sort_values("Probability",ascending = False)
output[i] = output[i].reset_index()
result.append(leader_follower(output[i]))
Y1 = result[0]
for i in range(1,len(result)):
Y1 =
|
pd.concat((Y1,result[i]))
|
pandas.concat
|
import os
import re
from urllib import request
import numpy as np
import pandas as pd
import altair as alt
data_folder = (os.path.join(os.path.dirname(__file__), 'data_files')
if '__file__' in locals() else 'data_files')
COL_REGION = 'Country/Region'
pd.set_option('display.max_colwidth', 300)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 10000)
SAVE_JHU_DATA = False
class SourceData:
df_mappings = pd.read_csv(os.path.join(data_folder, 'mapping_countries.csv'))
mappings = {'replace.country': dict(df_mappings.dropna(subset=['Name'])
.set_index('Country')['Name']),
'map.continent': dict(df_mappings.set_index('Name')['Continent'])
}
@classmethod
def _cache_csv_path(cls, name):
return os.path.join(data_folder, f'covid_jhu/{name}_transposed.csv')
@classmethod
def _save_covid_df(cls, df, name):
df.T.to_csv(cls._cache_csv_path(name))
@classmethod
def _load_covid_df(cls, name):
df = pd.read_csv(cls._cache_csv_path(name), index_col=0).T
df[df.columns[2:]] = df[df.columns[2:]].apply(pd.to_numeric, errors='coerce')
return df
@classmethod
def _download_covid_df(cls, name):
url = ('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv')
df = pd.read_csv(url)
return df
@classmethod
def get_covid_dataframe(cls, name):
df = cls._download_covid_df(name)
if SAVE_JHU_DATA:
cls._save_covid_df(df, name)
# rename countries
df[COL_REGION] = df[COL_REGION].replace(cls.mappings['replace.country'])
return df
@staticmethod
def get_dates(df):
return df.columns[~df.columns.isin(['Province/State', COL_REGION, 'Lat', 'Long'])]
class AgeAdjustedData:
# https://population.un.org/wpp/Download/Standard/Population/
# https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/1_Population/WPP2019_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.xlsx
csv_path = os.path.join(data_folder, 'world_pop_age_2020.csv')
class Cols:
# o = original
o4 = '0-4'
o9 = '5-9'
o14 = '10-14'
o19 = '15-19'
o24 = '20-24'
o29 = '25-29'
o34 = '30-34'
o39 = '35-39'
o44 = '40-44'
o49 = '45-49'
o54 = '50-54'
o59 = '55-59'
o64 = '60-64'
o69 = '65-69'
o74 = '70-74'
o79 = '75-79'
o84 = '80-84'
o89 = '85-89'
o94 = '90-94'
o99 = '95-99'
o100p = '100+'
# https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3590771
# ny = new york
ny17 = 'ny17' # 0-17
ny44 = 'ny44' # 18-44
ny64 = 'ny64' # 45-64
ny74 = 'ny74' # 65-74
ny75p = 'ny75p' # 75+
@classmethod
def load(cls):
df_raw = pd.read_csv(cls.csv_path)
df_filt = df_raw[df_raw['Type'].isin(['Subregion', 'Country/Area'])]
df_filt = (df_filt
.drop(columns=['Index', 'Variant', 'Notes', 'Country code', 'Parent code',
'Reference date (as of 1 July)', 'Type'])
.rename(columns={'Region, subregion, country or area *': COL_REGION}))
# adjust country names
df_filt[COL_REGION] = df_filt[COL_REGION].map({
'United States of America': 'US',
'China, Taiwan Province of China': 'Taiwan*',
'United Republic of Tanzania': 'Tanzania',
'Iran (Islamic Republic of)': 'Iran',
'Republic of Korea': 'South Korea',
'Bolivia (Plurinational State of)': 'Bolivia',
'Venezuela (Bolivarian Republic of)': 'Venezuela',
'Republic of Moldova': 'Moldova',
'Russian Federation': 'Russia',
'State of Palestine': 'West Bank and Gaza',
'Côte d\'Ivoire': 'Cote d\'Ivoire',
'Democratic Republic of the Congo': 'Congo (Kinshasa)',
'Congo': 'Congo (Brazzaville)',
'Syrian Arab Republic': 'Syria',
'Myanmar': 'Burma',
'Viet Nam': 'Vietnam',
'Brunei Darussalam': 'Brunei',
'Lao People\'s Democratic Republic': 'Laos'
}).fillna(df_filt[COL_REGION])
df_num = df_filt.set_index(COL_REGION)
# convert to numbers
df_num = df_num.apply(lambda s:
pd.Series(s)
.str.replace(' ', '')
.apply(pd.to_numeric, errors='coerce'))
population_s = df_num.sum(1) * 1000
# convert to ratios
df_pct = (df_num.T / df_num.sum(1)).T
# calulate NY bucket percentages
cols = cls.Cols
df_pct[cols.ny17] = df_pct[[cols.o4, cols.o9,
cols.o14, cols.o19]].sum(1)
df_pct[cols.ny44] = df_pct[[cols.o24, cols.o29,
cols.o34, cols.o39,
cols.o44]].sum(1)
df_pct[cols.ny64] = df_pct[[cols.o49,
cols.o54, cols.o59,
cols.o64]].sum(1)
df_pct[cols.ny74] = df_pct[[cols.o69, cols.o74]].sum(1)
df_pct[cols.ny75p] = df_pct[[cols.o79,
cols.o84, cols.o89,
cols.o94, cols.o99,
cols.o100p]].sum(1)
# check: df_pct[[cols.ny17, cols.ny44, cols.ny64, cols.ny74, cols.ny75p]].sum(1)
# calculate IFR
# https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3590771
# Table 1
ifr_s = pd.Series(np.dot(df_pct
[[cols.ny17, cols.ny44, cols.ny64, cols.ny74, cols.ny75p]],
[0.00002, 0.00087, 0.00822, 0.02626, 0.07137]),
index=df_pct.index)
## icu need estimation
## https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf
## 4.4% serious symptomatic cases for UK
## adjusting here by age by using IFRs ratios
## adjusting by UK's past testing bias (14) since the 4.4% figure is for reported cases
icu_percent_s = 0.044 * (ifr_s / ifr_s['United Kingdom']) / 14
return ifr_s, population_s, icu_percent_s
class ScrapedTableBase:
page = 'https://page.com/table'
file_name = 'file.csv'
@classmethod
def csv_path(cls):
return os.path.join(data_folder, cls.file_name)
@classmethod
def scrape(cls):
# !pip install beautifulsoup4
# !pip install lxml
import bs4
# read html
source = request.urlopen(cls.page).read()
soup = bs4.BeautifulSoup(source, 'lxml')
# get pandas df
table = soup.find_all('table')
return pd.read_html(str(table))[0]
@classmethod
def load(cls):
if not os.path.exists(cls.csv_path()):
cls.download()
return pd.read_csv(cls.csv_path())
@classmethod
def download(cls):
df = cls.scrape()
df.to_csv(cls.csv_path(), index=False)
class HostpitalBeds(ScrapedTableBase):
file_name = 'hospital_beds.csv'
page = 'https://en.wikipedia.org/wiki/List_of_countries_by_hospital_beds'
@classmethod
def download(cls):
df_wiki = cls.scrape()
# clean up df wikie
df_wiki = df_wiki.droplevel([0, 1], axis=1)
rename_map = {'Country/territory': 'country',
'ICU-CCB beds/100,000 inhabitants': 'icu_per_100k',
df_wiki.columns[df_wiki.columns.str.startswith('Occupancy')][0]: 'occupancy',
'2017': 'beds_per_1000_2017',
}
df_clean = df_wiki.rename(rename_map, axis=1)[rename_map.values()]
df_clean['icu_per_100k'] = pd.to_numeric(df_clean['icu_per_100k'].str
.replace(r'\[\d*\]', ''))
# load df for asian countries
# file manually created from
# https://www.researchgate.net/publication/338520008_Critical_Care_Bed_Capacity_in_Asian_Countries_and_Regions
df_asia = pd.read_csv(os.path.join(data_folder, 'ccb_asian_countries.csv'))
df_clean = pd.concat([df_clean,
df_asia[~df_asia['country'].isin(df_clean['country'])]])
df_clean.to_csv(cls.csv_path(), index=False)
class EmojiFlags(ScrapedTableBase):
file_name = 'emoji_flags.csv'
page = 'https://apps.timwhitlock.info/emoji/tables/iso3166'
emoji_col = 'emoji_code'
@classmethod
def download(cls):
df = cls.scrape()
df_filt = df.rename(columns={'Name': COL_REGION,
'Unicode': cls.emoji_col}
).drop(columns=['Emoji'])
# rename countries
df_filt[COL_REGION] = df_filt[COL_REGION].map({
'United States': 'US',
'Taiwan': 'Taiwan*',
'Macedonia': 'North Macedonia',
'Cape Verde': 'Cabo Verde',
'Saint Vincent and The Grenadines': 'Saint Vincent and the Grenadines',
'Palestinian Territory': 'West Bank and Gaza',
'Côte D\'Ivoire': 'Cote d\'Ivoire',
'Syrian Arab Republic': 'Syria',
'Myanmar': 'Burma',
'Viet Nam': 'Vietnam',
'Brunei Darussalam': 'Brunei',
'Lao People\'s Democratic Republic': 'Laos',
'Czech Republic': 'Czechia',
}).fillna(df_filt[COL_REGION])
# congo
df_filt.loc[df_filt['ISO'] == 'CD', COL_REGION] = 'Congo (Kinshasa)'
df_filt.loc[df_filt['ISO'] == 'CG', COL_REGION] = 'Congo (Brazzaville)'
# convert emoji hex codes to decimal
df_filt[cls.emoji_col] = df_filt[cls.emoji_col].apply(
lambda s: ''.join(f'&#{int(hex, 16)};'
for hex in re.findall(r'U\+(\S+)', s)))
df_filt.to_csv(cls.csv_path(), index=False)
class CovidData:
COL_REGION = COL_REGION
ABS_COLS = ['Cases.total', 'Deaths.total', 'Cases.new', 'Deaths.new']
PER_100K_COLS = [f'{c}.per100k' for c in ABS_COLS]
CASES_COLS = ABS_COLS[::2] + PER_100K_COLS[::2]
EST_COLS = [f'{c}.est' for c in CASES_COLS]
dft_cases = SourceData.get_covid_dataframe('confirmed')
dft_deaths = SourceData.get_covid_dataframe('deaths')
dft_recovered = SourceData.get_covid_dataframe('recovered')
dt_cols_all = SourceData.get_dates(dft_cases)
cur_date = pd.to_datetime(dt_cols_all[-1]).date().isoformat()
PREV_LAG = 5
# modeling constants
## testing bias
death_lag = 8
## ICU spare capacity
# occupancy 66% for us:
# https://www.sccm.org/Blog/March-2020/United-States-Resource-Availability-for-COVID-19
# occupancy average 75% for OECD:
# https://www.oecd-ilibrary.org/social-issues-migration-health/health-at-a-glance-2019_4dd50c09-en
icu_spare_capacity_ratio = 0.3
def __init__(self, days_offset=0):
assert days_offset <= 0, 'day_offest can only be 0 or negative (in the past)'
self.dt_cols = self.dt_cols_all[:(len(self.dt_cols_all) + days_offset)]
self.dft_cases_backfilled = self._cases_with_backfilled_unreported_days()
self.dfc_cases = self.dft_cases_backfilled[self.dt_cols[-1]]
self.dfc_deaths = self.dft_deaths.groupby(COL_REGION)[self.dt_cols[-1]].sum()
def _cases_with_backfilled_unreported_days(self):
def backfill_missing(series, backfill_prev_threshold=50):
"""
Fills 0 diff days between days with large measurements by spreading the
future's "catch up" day's cases on the zero days.
:param series: pandas series of daily cases
:param backfill_prev_threshold: number of cases per day after which a 0 day
is considered a missing measurement rather than a true zero
:return: backfilled series of daily cases
"""
out = [series[0]]
missing = 0
for cur in series[1:]:
if cur == 0:
if out[-1] >= backfill_prev_threshold:
# a lot of cases on previous appended day
missing += 1 # increase missing days
else:
# normal: too few cases previously, a zero is plausible
out.append(cur)
elif cur > 0:
if missing:
# catching up by backfilling from current value
out.extend([cur / (missing + 1)] * (missing + 1))
missing = 0 # reset missing condition
else:
# normal: cases accumulating
out.append(cur)
else: # cur < 0
# some kind of data adjustment (e.g. France)
if missing: # reset missing
out.extend([0] * missing)
missing = 0
out.append(cur)
if missing: # finished on missing (no "catch up" day until now)
out.extend([0] * missing)
return pd.Series(out, index=series.index)
cases = self.dft_cases.groupby(self.COL_REGION).sum()[self.dt_cols_all]
diffs = cases.diff(axis=1)
diffs.iloc[:, 0] = cases.iloc[:, 0] # replace resulting nans in first date's data
fixed = diffs.apply(backfill_missing, axis=1)
imputed_cases = fixed.cumsum(axis=1)
return imputed_cases
def lagged_cases(self, lag=PREV_LAG):
return self.dft_cases_backfilled[self.dt_cols[-lag]]
def lagged_deaths(self, lag=PREV_LAG):
return self.dft_deaths.groupby(COL_REGION)[self.dt_cols[-lag]].sum()
def add_last_dates(self, df):
def last_date(s):
non_zero_s = s[4:][s[4:] > 0]
if len(non_zero_s):
return pd.to_datetime(non_zero_s.index[-1]).date().isoformat()
else:
return float('nan')
df['last_case_date'] = (self.dft_cases.groupby(COL_REGION).sum().diff(axis=1)
.apply(last_date, axis=1))
df['last_death_date'] = (self.dft_deaths.groupby(COL_REGION).sum().diff(axis=1)
.apply(last_date, axis=1))
return df
def overview_table(self):
df_table = (pd.DataFrame({'Cases.total': self.dfc_cases,
'Deaths.total': self.dfc_deaths,
'Cases.total.prev': self.lagged_cases(),
'Deaths.total.prev': self.lagged_deaths()})
.sort_values(by=['Cases.total', 'Deaths.total'], ascending=[False, False])
.reset_index())
df_table.rename(columns={'index': COL_REGION}, inplace=True)
for c in self.ABS_COLS[:2]:
df_table[c.replace('total', 'new')] = (df_table[c] - df_table[f'{c}.prev']).clip(0) # DATA BUG
df_table['Fatality Rate'] = (100 * df_table['Deaths.total'] / df_table['Cases.total']).round(1)
df_table['Continent'] = df_table[COL_REGION].map(SourceData.mappings['map.continent'])
# remove problematic
df_table = df_table[~df_table[COL_REGION].isin(['Cape Verde', 'Cruise Ship', 'Kosovo'])]
return df_table
@classmethod
def beds_df(cls):
df_beds = HostpitalBeds.load().rename(columns={'country': COL_REGION})
df_beds[COL_REGION] = df_beds[COL_REGION].map({
'United States': 'US',
'United Kingdom (more)': 'United Kingdom',
'Czech Republic': 'Czechia',
}).fillna(df_beds[COL_REGION])
return df_beds.set_index(COL_REGION)
def overview_table_with_extra_data(self):
df = (self.overview_table()
.drop(['Cases.total.prev', 'Deaths.total.prev'], axis=1)
.set_index(COL_REGION, drop=True)
.sort_values('Cases.new', ascending=False))
df['Fatality Rate'] /= 100
df['emoji_flag'] = EmojiFlags.load().set_index(COL_REGION)[EmojiFlags.emoji_col]
df['emoji_flag'] = df['emoji_flag'].fillna('')
df = self.add_last_dates(df)
(df['age_adjusted_ifr'],
df['population'],
df['age_adjusted_icu_percentage']) = AgeAdjustedData.load()
df.dropna(subset=['population'], inplace=True)
for col, per_100k_col in zip(self.ABS_COLS, self.PER_100K_COLS):
df[per_100k_col] = df[col] * 1e5 / df['population']
return df
def table_with_estimated_cases(self):
"""
Assumptions:
- unbiased (if everyone is tested) mortality rate is
around 1.5% (from what was found in heavily tested countries)
- it takes on average 8 days after being reported case (tested positive)
to die and become reported death.
- testing ratio / bias (how many are suspected tested) of countries
didn't change significantly during the last 8 days.
- Recent new cases can be adjusted using the same testing_ratio bias.
"""
df = self.overview_table_with_extra_data()
lagged_mortality_rate = (self.dfc_deaths + 1) / (self.lagged_cases(self.death_lag) + 2)
testing_bias = lagged_mortality_rate / df['age_adjusted_ifr']
testing_bias[testing_bias < 1] = 1
df['lagged_fatality_rate'] = lagged_mortality_rate
df['testing_bias'] = testing_bias
for col, est_col in zip(self.CASES_COLS, self.EST_COLS):
df[est_col] = df['testing_bias'] * df[col]
return df.sort_values('Cases.new.est', ascending=False)
def table_with_icu_capacities(self):
df = self.table_with_estimated_cases()
df_beds = self.beds_df()
df['icu_capacity_per100k'] = df_beds['icu_per_100k']
df['icu_spare_capacity_per100k'] = df['icu_capacity_per100k'] * self.icu_spare_capacity_ratio
return df
@classmethod
def filter_df(cls, df, cases_filter=1000, deaths_filter=20, population_filter=3e5):
return df[((df['Cases.total'] > cases_filter) |
(df['Deaths.total'] > deaths_filter)) &
(df['population'] > population_filter)][df.columns.sort_values()]
@classmethod
def rename_long_names(cls, df):
return df.rename(index={'Bosnia and Herzegovina': 'Bosnia',
'United Arab Emirates': 'UAE',
'Central African Republic': 'CAR (Africa)',
})
def smoothed_growth_rates(self, n_days):
recent_dates = self.dt_cols[-n_days:]
cases = (self.dft_cases_backfilled[recent_dates] + 1) # with pseudo counts
diffs = self.dft_cases_backfilled.diff(axis=1)[recent_dates]
diffs[diffs < 0] = 0 # total cases cannot go down
cases, diffs = cases.T, diffs.T # broadcasting works correctly this way
# daily rate is new / (total - new)
daily_growth_rates = cases / (cases - diffs)
# dates with larger number of cases have higher sampling accuracy
# so their measurement deserve more confidence
sampling_weights = (cases / cases.sum(0))
weighted_mean = (daily_growth_rates * sampling_weights).sum(0)
weighted_std = ((daily_growth_rates - weighted_mean).pow(2) *
sampling_weights).sum(0).pow(0.5)
return weighted_mean - 1, weighted_std
def table_with_projections(self, projection_days=(7, 14, 30), debug_dfs=False):
df = self.table_with_icu_capacities()
df['affected_ratio'] = df['Cases.total'] / df['population']
df['growth_rate'], df['growth_rate_std'] = self.smoothed_growth_rates(n_days=self.PREV_LAG)
past_active, past_recovered = self._calculate_recovered_and_active_until_now(df)
df['transmission_rate'], df['transmission_rate_std'] = Model.growth_to_infection_rate(
growth=df['growth_rate'],
rec=past_recovered[-1],
act=past_active[-1],
growth_std=df['growth_rate_std'])
df, traces = Model.run_model_forward(
df,
past_active=past_active.copy(),
past_recovered=past_recovered.copy(),
projection_days=projection_days)
if debug_dfs:
debug_dfs = Model.timeseries_for_countries(
debug_countries=df.index,
traces=traces,
simulation_start_day=len(past_recovered) - 1,
infection_rate=df['transmission_rate'])
return df, debug_dfs
return df
def _calculate_recovered_and_active_until_now(self, df):
# estimated daily cases ratio of population
lagged_cases_ratios = (self.dft_cases_backfilled[self.dt_cols].T *
df['testing_bias'].T / df['population'].T).T
# protect from testing bias over-inflation
lagged_cases_ratios[lagged_cases_ratios > 1] = 1
# run through history and estimate recovered and active using:
# https://covid19dashboards.com/outstanding_cases/#Appendix:-Methodology-of-Predicting-Recovered-Cases
actives, recs = [], []
zeros_series = lagged_cases_ratios[self.dt_cols[0]] * 0 # this is to have consistent types
for day in range(len(self.dt_cols)):
prev_rec = recs[day - 1] if day > 0 else zeros_series
tot_lagged_9 = lagged_cases_ratios[self.dt_cols[day - 9]] if day >= 9 else zeros_series
new_recs = prev_rec + (tot_lagged_9 - prev_rec) * Model.recovery_lagged9_rate
new_recs[new_recs > 1] = 1
recs.append(new_recs)
actives.append(lagged_cases_ratios[self.dt_cols[day]] - new_recs)
return actives, recs
class Model:
## recovery estimation
recovery_lagged9_rate = 0.07
## sir model
rec_rate_simple = 0.05
@classmethod
def run_model_forward(cls,
df,
past_active,
past_recovered,
projection_days,
):
sus, act, rec = cls._run_sir_mode(
past_recovered, past_active, df['growth_rate'], n_days=projection_days[-1])
# sample more growth rates
sus_lists = [[s] for s in sus]
act_lists = [[a] for a in act]
rec_lists = [[r] for r in rec]
for ratio in np.linspace(-1, 1, 10):
pert_growth = df['growth_rate'] + ratio * df['growth_rate_std']
pert_growth[pert_growth < 0] = 0
sus_other, act_other, rec_other = cls._run_sir_mode(
past_recovered, past_active, pert_growth, n_days=projection_days[-1])
for s_list, s in zip(sus_lists, sus_other):
s_list.append(s)
for a_list, a in zip(act_lists, act_other):
a_list.append(a)
for r_list, r in zip(rec_lists, rec_other):
r_list.append(r)
def list_to_max_min(l):
concated = [pd.concat(sub_l, axis=1) for sub_l in l]
max_list, min_list = zip(*[(d.max(1), d.min(1)) for d in concated])
return max_list, min_list
sus_max, sus_min = list_to_max_min(sus_lists)
act_max, act_min = list_to_max_min(act_lists)
rec_max, rec_min = list_to_max_min(rec_lists)
day_one = len(past_recovered)
for day in [1] + list(projection_days):
ind = day_one + day - 1
suffix = f'.+{day}d' if day > 1 else ''
icu_max = df['age_adjusted_icu_percentage'] * 1e5
df[f'needICU.per100k{suffix}'] = act[ind] * icu_max
df[f'needICU.per100k{suffix}.max'] = act_max[ind] * icu_max
df[f'needICU.per100k{suffix}.min'] = act_min[ind] * icu_max
df[f'needICU.per100k{suffix}.err'] = (act_max[ind] - act_min[ind]) * icu_max / 2
df[f'affected_ratio.est{suffix}'] = 1 - sus[ind]
df[f'affected_ratio.est{suffix}.max'] = 1 - sus_min[ind]
df[f'affected_ratio.est{suffix}.min'] = 1 - sus_max[ind]
df[f'affected_ratio.est{suffix}.err'] = (sus_max[ind] - sus_min[ind]) / 2
traces = {
'sus_center': sus, 'sus_max': sus_max, 'sus_min': sus_min,
'act_center': act, 'act_max': act_max, 'act_min': act_min,
'rec_center': rec, 'rec_max': rec_max, 'rec_min': rec_min,
}
return df, traces
@classmethod
def growth_to_infection_rate(cls, growth, rec, act, growth_std=None):
daily_delta = growth
tot = rec + act
active = act
# Explanation of the formula below:
# daily delta = delta total / total
# daily delta = new-infected / total
# daily_delta = infect_rate * active * (1 - tot) / tot, so solving for infect_rate:
infect_rate = (daily_delta * tot) / ((1 - tot) * active)
# standard deviation
infect_std = 0
if growth_std is not None:
# higher bound
infect_higher = ((daily_delta + growth_std) * tot) / ((1 - tot) * active)
# lower bound
growth_lower = daily_delta - growth_std
growth_lower[growth_lower < 0] = 0
infect_lower = (growth_lower * tot) / ((1 - tot) * active)
infect_std = (infect_higher - infect_lower) / 2
return infect_rate, infect_std
@classmethod
def _run_sir_mode(cls, past_rec, past_act, growth, n_days):
rec, act = past_rec.copy(), past_act.copy()
infect_rate, _ = cls.growth_to_infection_rate(growth, rec[-1], act[-1])
# simulate
for i in range(n_days):
# calculate susceptible
sus = 1 - rec[-1] - act[-1]
# calculate new recovered
actives_lagged_9 = act[-9]
delta_rec = actives_lagged_9 * cls.recovery_lagged9_rate
delta_rec_simple = act[-1] * cls.rec_rate_simple
# limit recovery rate to simple SIR model where
# lagged rate estimation becomes too high (on the downward slopes)
delta_rec[delta_rec > delta_rec_simple] = delta_rec_simple[delta_rec > delta_rec_simple]
new_recovered = rec[-1] + delta_rec
# calculate new active
delta_infect = act[-1] * sus * infect_rate
new_active = act[-1] + delta_infect - delta_rec
new_active[new_active < 0] = 0
# update
rec.append(new_recovered)
act.append(new_active)
sus = [1 - r - a for r, a in zip(rec, act)]
return sus, act, rec
@staticmethod
def timeseries_for_countries(debug_countries, traces,
simulation_start_day, infection_rate):
dfs = []
for debug_country in debug_countries:
debug = [{'day': day - simulation_start_day,
'Susceptible': traces['sus_center'][day][debug_country],
'Susceptible.max': traces['sus_max'][day][debug_country],
'Susceptible.min': traces['sus_min'][day][debug_country],
'Infected': traces['act_center'][day][debug_country],
'Infected.max': traces['act_max'][day][debug_country],
'Infected.min': traces['act_min'][day][debug_country],
'Removed': traces['rec_center'][day][debug_country],
'Removed.max': traces['rec_max'][day][debug_country],
'Removed.min': traces['rec_min'][day][debug_country],
}
for day in range(len(traces['rec_center']))]
title = (f"{debug_country}: "
f"Transmission Rate: {infection_rate[debug_country]:.1%}. "
f"S/I/R init: {debug[0]['Susceptible']:.1%},"
f"{debug[0]['Infected']:.1%},{debug[0]['Removed']:.1%}")
df = pd.DataFrame(debug).set_index('day')
df['title'] = title
df['country'] = debug_country
dfs.append(df)
return dfs
def altair_sir_plot(df_alt, default_country):
alt.data_transformers.disable_max_rows()
select_country = alt.selection_single(
name='Select',
fields=['country'],
init={'country': default_country},
bind=alt.binding_select(options=sorted(df_alt['country'].unique()))
)
title = (alt.Chart(df_alt[['country', 'title']].drop_duplicates())
.mark_text(dy=-180, dx=0, size=16)
.encode(text='title:N')
.transform_filter(select_country))
base = alt.Chart(df_alt).encode(x='day:Q')
line_cols = ['Infected', 'Removed'] # 'Susceptible'
colors = ['red', 'green']
lines = (base.mark_line()
.transform_fold(line_cols)
.encode(x=alt.X('day:Q', title=f'days relative to today ({CovidData.cur_date})'),
y=alt.Y('value:Q',
axis=alt.Axis(format='%', title='Percentage of Population')),
color=alt.Color('key:N',
scale=alt.Scale(domain=line_cols, range=colors))))
import functools
bands = functools.reduce(alt.Chart.__add__,
[base.mark_area(opacity=0.1, color=color)
.encode(y=f'{col}\.max:Q', y2=f'{col}\.min:Q')
for col, color in zip(line_cols, colors)])
today_line = (alt.Chart(
|
pd.DataFrame({'x': [0]})
|
pandas.DataFrame
|
# -*- coding: cp1254 -*-
"""
This script creates Landslide Susceptibility Map (LSM) with Multi Layer Perceptron Model
<NAME> (2018)
"""
#////////////////////IMPORTING THE REQUIRED LIBRARIES/////////////////////////
import arcpy
import os
from arcpy.sa import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
arcpy.env.overwriteOutput = True
#////////////////////////////Getting Input Parameters//////////////////////////
rec=arcpy.GetParameterAsText(0)#The folder including output data of Data Preparation script
sf=arcpy.GetParameterAsText(1)#output file is saving this Folder
koordinat=arcpy.GetParameterAsText(2)#Coordinate system of map
cell_size=arcpy.GetParameterAsText(3)#Cell size
wt=str(arcpy.GetParameterAsText(4))#weighting data type (Frequency ratio or Information value)
h_layer=arcpy.GetParameterAsText(5)#hidden layer size
act=arcpy.GetParameterAsText(6)#activation
slv=arcpy.GetParameterAsText(7)#solver
alpha=float(arcpy.GetParameterAsText(8))#Alpha
l_rate=arcpy.GetParameterAsText(9)#learnning rate
l_rate_init=float(arcpy.GetParameterAsText(10))#learning rate init
max_it=int(arcpy.GetParameterAsText(11))#maximum number of iteration
mom=float(arcpy.GetParameterAsText(12))#momentum
arcpy.env.workspace=rec
#//////////////////checking Hidden layer size single or multi./////////////////
h_layer=h_layer.split(";")
layer_lst=[]
for h in h_layer:
h=int(h)
layer_lst.append(h)
if len(layer_lst)==1:
hls=layer_lst[0]
else:
hls=tuple(layer_lst)#tuple for Hidden layer size parameter
#/////////showing parameter on screen//////////////////////////////////////////
arcpy.AddMessage("Hidden layer size:---------------:{}".format(hls))
arcpy.AddMessage("Activation function:-------------:{}".format(act))
arcpy.AddMessage("Solver:--------------------------:{}".format(slv))
arcpy.AddMessage("Alpha----------------------------:{}".format(alpha))
arcpy.AddMessage("Learning rate:-------------------:{}".format(l_rate))
arcpy.AddMessage("Learning rate init---------------:{}".format(l_rate_init))
arcpy.AddMessage("Max iter:------------------------:{}".format(max_it))
arcpy.AddMessage("Momentum-------------------------:{}".format(mom))
os.chdir(rec)
#///////////////////////////Starting Anlysis///////////////////////////////////
arcpy.AddMessage("Starting analysis with MLP algorithm")
os.chdir(rec)
#//////Checking Weighting data Frequency ratio or İnformation value////////////
if wt=="frequency ratio":
trn="train_fr.csv"
pre="pre_fr.csv"
else:
trn="train_iv.csv"
pre="pre_iv.csv"
#Loading Train data
veriler=pd.read_csv(trn)
veriler=veriler.replace(-9999,"NaN")
#Loading Analysis data
analiz=pd.read_csv(pre)
analiz=analiz.replace(-9999,"NaN")
#Preparing parameters
va,vb=veriler.shape
aa,ab=analiz.shape
parametreler=veriler.iloc[:,2:vb].values
#Preparing label (class) data
cls=veriler.iloc[:,1:2].values
##preparing Analysis data
pre=analiz.iloc[:,2:ab-2].values
#preparing Coordinate data
koor=analiz.iloc[:,ab-2:ab].values
s_train=va
s_analiz=aa
koor=pd.DataFrame(data=koor,index=range(aa),columns=["x","y"])
#Converting NaN values to median
imputer= Imputer(missing_values='NaN', strategy = 'median', axis=0 )
parametreler=imputer.fit_transform(parametreler)
pre=imputer.fit_transform(pre)
cls=imputer.fit_transform(cls)
sc1=StandardScaler()
sc1.fit(parametreler)
parametreler=sc1.transform(parametreler)
pre=sc1.transform(pre)
pre=pd.DataFrame(data=pre)
x=
|
pd.DataFrame(data=parametreler)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
import re
import sys
from datetime import datetime
from random import randint
from time import sleep
import numpy as np
import pandas.util.testing as tm
import pytest
import pytz
from pandas import DataFrame, NaT, compat
from pandas.compat import range, u
from pandas.compat.numpy import np_datetime64_compat
from pandas_gbq import gbq
try:
import mock
except ImportError:
from unittest import mock
TABLE_ID = 'new_test'
def _skip_local_auth_if_in_travis_env():
if _in_travis_environment():
pytest.skip("Cannot run local auth in travis environment")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise pytest.skip("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_dataset_prefix_random():
return ''.join(['pandas_gbq_', str(randint(1, 100000))])
def _get_project_id():
project = os.environ.get('GBQ_PROJECT_ID')
if not project:
pytest.skip(
"Cannot run integration tests without a project id")
return project
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
def _get_private_key_contents():
key_path = _get_private_key_path()
if key_path is None:
return None
with open(key_path) as f:
return f.read()
@pytest.fixture(autouse=True, scope='module')
def _test_imports():
try:
import pkg_resources # noqa
except ImportError:
raise ImportError('Could not import pkg_resources (setuptools).')
gbq._test_google_api_imports()
@pytest.fixture
def project():
return _get_project_id()
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See https://github.com/pandas-dev/pandas/issues/13577
import google.auth
from google.auth.exceptions import DefaultCredentialsError
try:
credentials, _ = google.auth.default(scopes=[gbq.GbqConnector.scope])
except (DefaultCredentialsError, IOError):
return False
return gbq._try_credentials(_get_project_id(), credentials) is not None
def clean_gbq_environment(dataset_prefix, private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
all_datasets = dataset.datasets()
retry = 3
while retry > 0:
try:
retry = retry - 1
for i in range(1, 10):
dataset_id = dataset_prefix + str(i)
if dataset_id in all_datasets:
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
# Table listing is eventually consistent, so loop until
# all tables no longer appear (max 30 seconds).
table_retry = 30
all_tables = dataset.tables(dataset_id)
while all_tables and table_retry > 0:
for table_id in all_tables:
try:
table.delete(table_id)
except gbq.NotFoundException:
pass
sleep(1)
table_retry = table_retry - 1
all_tables = dataset.tables(dataset_id)
dataset.delete(dataset_id)
retry = 0
except gbq.GenericGBQException as ex:
# Build in retry logic to work around the following errors :
# An internal error occurred and the request could not be...
# Dataset ... is still in use
error_message = str(ex).lower()
if ('an internal error occurred' in error_message or
'still in use' in error_message) and retry > 0:
sleep(30)
else:
raise ex
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with pytest.warns(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
@pytest.fixture(params=['local', 'service_path', 'service_creds'])
def auth_type(request):
auth = request.param
if auth == 'local':
if _in_travis_environment():
pytest.skip("Cannot run local auth in travis environment")
elif auth == 'service_path':
if _in_travis_environment():
pytest.skip("Only run one auth type in Travis to save time")
_skip_if_no_private_key_path()
elif auth == 'service_creds':
_skip_if_no_private_key_contents()
else:
raise ValueError
return auth
@pytest.fixture()
def credentials(auth_type):
if auth_type == 'local':
return None
elif auth_type == 'service_path':
return _get_private_key_path()
elif auth_type == 'service_creds':
return _get_private_key_contents()
else:
raise ValueError
@pytest.fixture()
def gbq_connector(project, credentials):
return gbq.GbqConnector(project, private_key=credentials)
class TestGBQConnectorIntegration(object):
def test_should_be_able_to_make_a_connector(self, gbq_connector):
assert gbq_connector is not None, 'Could not create a GbqConnector'
def test_should_be_able_to_get_valid_credentials(self, gbq_connector):
credentials = gbq_connector.get_credentials()
assert credentials.valid
def test_should_be_able_to_get_a_bigquery_client(self, gbq_connector):
bigquery_client = gbq_connector.get_client()
assert bigquery_client is not None
def test_should_be_able_to_get_schema_from_query(self, gbq_connector):
schema, pages = gbq_connector.run_query('SELECT 1')
assert schema is not None
def test_should_be_able_to_get_results_from_query(self, gbq_connector):
schema, pages = gbq_connector.run_query('SELECT 1')
assert pages is not None
class TestGBQConnectorIntegrationWithLocalUserAccountAuth(object):
@pytest.fixture(autouse=True)
def setup(self, project):
_skip_local_auth_if_in_travis_env()
self.sut = gbq.GbqConnector(project, auth_local_webserver=True)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
# Can get real credentials, so mock it out to fail.
from google.auth.exceptions import DefaultCredentialsError
with mock.patch('google.auth.default',
side_effect=DefaultCredentialsError()):
credentials = self.sut.get_application_default_credentials()
else:
credentials = self.sut.get_application_default_credentials()
assert credentials is None
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
pytest.skip("Cannot get default_credentials "
"from the environment!")
from google.auth.credentials import Credentials
credentials = self.sut.get_application_default_credentials()
assert isinstance(credentials, Credentials)
def test_get_user_account_credentials_bad_file_returns_credentials(self):
from google.auth.credentials import Credentials
with mock.patch('__main__.open', side_effect=IOError()):
credentials = self.sut.get_user_account_credentials()
assert isinstance(credentials, Credentials)
def test_get_user_account_credentials_returns_credentials(self):
from google.auth.credentials import Credentials
credentials = self.sut.get_user_account_credentials()
assert isinstance(credentials, Credentials)
class TestGBQUnit(object):
def test_should_return_credentials_path_set_by_env_var(self):
env = {'PANDAS_GBQ_CREDENTIALS_FILE': '/tmp/dummy.dat'}
with mock.patch.dict('os.environ', env):
assert gbq._get_credentials_file() == '/tmp/dummy.dat'
@pytest.mark.parametrize(
('input', 'type_', 'expected'), [
(1, 'INTEGER', int(1)),
(1, 'FLOAT', float(1)),
pytest.param('false', 'BOOLEAN', False, marks=pytest.mark.xfail),
pytest.param(
'0e9', 'TIMESTAMP',
np_datetime64_compat('1970-01-01T00:00:00Z'),
marks=pytest.mark.xfail),
('STRING', 'STRING', 'STRING'),
])
def test_should_return_bigquery_correctly_typed(
self, input, type_, expected):
result = gbq._parse_data(
dict(fields=[dict(name='x', type=type_, mode='NULLABLE')]),
rows=[[input]]).iloc[0, 0]
assert result == expected
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with pytest.raises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with pytest.raises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with pytest.raises(TypeError):
gbq.read_gbq('SELECT 1')
def test_that_parse_data_works_properly(self):
from google.cloud.bigquery.table import Row
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'column_x', 'type': 'STRING'}]}
field_to_index = {'column_x': 0}
values = ('row_value',)
test_page = [Row(values, field_to_index)]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'column_x': ['row_value']})
tm.assert_frame_equal(test_output, correct_output)
def test_read_gbq_with_invalid_private_key_json_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='y')
def test_read_gbq_with_empty_private_key_json_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='{}')
def test_read_gbq_with_private_key_json_wrong_types_should_fail(self):
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key='{ "client_email" : 1, "private_key" : True }')
def test_read_gbq_with_empty_private_key_file_should_fail(self):
with tm.ensure_clean() as empty_file_path:
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x',
private_key=empty_file_path)
def test_read_gbq_with_corrupted_private_key_json_should_fail(self):
_skip_if_no_private_key_contents()
with pytest.raises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key=re.sub('[a-z]', '9', _get_private_key_contents()))
def test_should_read(project, credentials):
query = 'SELECT "PI" AS valid_string'
df = gbq.read_gbq(query, project_id=project, private_key=credentials)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
class TestReadGBQIntegration(object):
@pytest.fixture(autouse=True)
def setup(self, project, credentials):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is
# executed.
self.gbq_connector = gbq.GbqConnector(
project, private_key=credentials)
self.credentials = credentials
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" AS valid_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'valid_string': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" AS empty_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'empty_string': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) AS null_string'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_string': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) AS valid_integer'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'valid_integer': [3]}))
def test_should_properly_handle_nullable_integers(self):
query = '''SELECT * FROM
(SELECT 1 AS nullable_integer),
(SELECT NULL AS nullable_integer)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_integer': [1, None]}).astype(object))
def test_should_properly_handle_valid_longs(self):
query = 'SELECT 1 << 62 AS valid_long'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'valid_long': [1 << 62]}))
def test_should_properly_handle_nullable_longs(self):
query = '''SELECT * FROM
(SELECT 1 << 62 AS nullable_long),
(SELECT NULL AS nullable_long)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_long': [1 << 62, None]}).astype(object))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) AS null_integer'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_integer': [None]}))
def test_should_properly_handle_valid_floats(self):
from math import pi
query = 'SELECT PI() AS valid_float'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'valid_float': [pi]}))
def test_should_properly_handle_nullable_floats(self):
from math import pi
query = '''SELECT * FROM
(SELECT PI() AS nullable_float),
(SELECT NULL AS nullable_float)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_float': [pi, None]}))
def test_should_properly_handle_valid_doubles(self):
from math import pi
query = 'SELECT PI() * POW(10, 307) AS valid_double'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'valid_double': [pi * 10 ** 307]}))
def test_should_properly_handle_nullable_doubles(self):
from math import pi
query = '''SELECT * FROM
(SELECT PI() * POW(10, 307) AS nullable_double),
(SELECT NULL AS nullable_double)'''
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(
df, DataFrame({'nullable_double': [pi * 10 ** 307, None]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) AS null_float'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({'null_float': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") AS unix_epoch'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame(
{'unix_epoch': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") AS valid_timestamp'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df, DataFrame({
'valid_timestamp': [np.datetime64('2004-09-15T05:00:00.000000Z')]
}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) AS null_timestamp'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=self.credentials)
tm.assert_frame_equal(df,
|
DataFrame({'null_timestamp': [NaT]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import sys, os
import datetime, time
from math import ceil, floor # ceil : 소수점 이하를 올림, floor : 소수점 이하를 버림
import math
import pickle
import uuid
import base64
import subprocess
from subprocess import Popen
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar)
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
import numpy as np
from numpy import NaN, Inf, arange, isscalar, asarray, array
import pandas as pd
import pandas.io.sql as pdsql
from pandas import DataFrame, Series
# Google SpreadSheet Read/Write
import gspread # (추가 설치 모듈)
from oauth2client.service_account import ServiceAccountCredentials # (추가 설치 모듈)
from df2gspread import df2gspread as d2g # (추가 설치 모듈)
from string import ascii_uppercase # 알파벳 리스트
from bs4 import BeautifulSoup
import requests
import logging
import logging.handlers
import sqlite3
import telepot # 텔레그램봇(추가 설치 모듈)
from slacker import Slacker # 슬랙봇(추가 설치 모듈)
import csv
import FinanceDataReader as fdr
# Google Spreadsheet Setting *******************************
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
json_file_name = './secret/xtrader-276902-f5a8b77e2735.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_name, scope)
gc = gspread.authorize(credentials)
# XTrader-Stocklist URL
# spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0' # Test Sheet
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1XE4sk0vDw4fE88bYMDZuJbnP4AF9CmRYHKY6fCXABw4/edit#gid=0' # Sheeet
testsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0'
# spreadsheet 연결 및 worksheet setting
doc = gc.open_by_url(spreadsheet_url)
doc_test = gc.open_by_url(testsheet_url)
shortterm_buy_sheet = doc.worksheet('매수모니터링')
shortterm_sell_sheet = doc.worksheet('매도모니터링')
shortterm_strategy_sheet = doc.worksheet('ST bot')
shortterm_history_sheet = doc.worksheet('매매이력')
condition_history_sheet = doc_test.worksheet('조건식이력')
price_monitoring_sheet = doc_test.worksheet('주가모니터링')
shortterm_history_cols = ['번호', '종목명', '매수가', '매수수량', '매수일', '매수전략', '매수조건', '매도가', '매도수량',
'매도일', '매도전략', '매도구간', '수익률(계산)','수익률', '수익금', '세금+수수료', '확정 수익금']
shortterm_analysis_cols = ['번호', '종목명', '우선순위', '일봉1', '일봉2', '일봉3', '일봉4', '주봉1', '월봉1', '거래량', '기관수급', '외인수급', '개인']
condition_history_cols = ['종목명', '매수가', '매수일','매도가', '매도일', '수익률(계산)', '수익률', '수익금', '세금+수수료']
# 구글 스프레드시트 업데이트를 위한 알파벳리스트(열 이름 얻기위함)
alpha_list = list(ascii_uppercase)
# SQLITE DB Setting *****************************************
DATABASE = 'stockdata.db'
def sqliteconn():
conn = sqlite3.connect(DATABASE)
return conn
# DB에서 종목명으로 종목코드, 종목영, 시장구분 반환
def get_code(종목명체크):
# 종목명이 띄워쓰기, 대소문자 구분이 잘못될 것을 감안해서
# DB 저장 시 종목명체크 컬럼은 띄워쓰기 삭제 및 소문자로 저장됨
# 구글에서 받은 종목명을 띄워쓰기 삭제 및 소문자로 바꿔서 종목명체크와 일치하는 데이터 저장
# 종목명은 DB에 있는 정상 종목명으로 사용하도록 리턴
종목명체크 = 종목명체크.lower().replace(' ', '')
query = """
select 종목코드, 종목명, 시장구분
from 종목코드
where (종목명체크 = '%s')
""" % (종목명체크)
conn = sqliteconn()
df = pd.read_sql(query, con=conn)
conn.close()
return list(df[['종목코드', '종목명', '시장구분']].values)[0]
# 종목코드가 int형일 경우 정상적으로 반환
def fix_stockcode(data):
if len(data)< 6:
for i in range(6 - len(data)):
data = '0'+data
return data
# 구글 스프레드 시트 Import후 DataFrame 반환
def import_googlesheet():
try:
# 1. 매수 모니터링 시트 체크 및 매수 종목 선정
row_data = shortterm_buy_sheet.get_all_values() # 구글 스프레드시트 '매수모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_strategy = row_data[0].index('기본매도전략')
idx_buyprice = row_data[0].index('매수가1')
idx_sellprice = row_data[0].index('목표가')
# DB에서 받아올 종목코드와 시장 컬럼 추가
# 번호, 종목명, 매수모니터링, 비중, 시가위치, 매수가1, 매수가2, 매수가3, 기존매도전략, 목표가
row_data[0].insert(2, '종목코드')
row_data[0].insert(3, '시장')
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
except Exception as e:
name = ''
code = ''
market = ''
print('구글 매수모니터링 시트 종목명 오류 : %s' % (row[1]))
logger.error('구글 매수모니터링 시트 오류 : %s' % (row[1]))
Telegram('[XTrader]구글 매수모니터링 시트 오류 : %s' % (row[1]))
row[1] = name # 정상 종목명으로 저장
row.insert(2, code)
row.insert(3, market)
data = pd.DataFrame(data=row_data[1:], columns=row_data[0])
# 사전 데이터 정리
data = data[(data['매수모니터링'] == '1') & (data['종목코드']!= '')]
data = data[row_data[0][:row_data[0].index('목표가')+1]]
del data['매수모니터링']
data.to_csv('%s_googlesheetdata.csv'%(datetime.date.today().strftime('%Y%m%d')), encoding='euc-kr', index=False)
# 2. 매도 모니터링 시트 체크(번호, 종목명, 보유일, 매도전략, 매도가)
row_data = shortterm_sell_sheet.get_all_values() # 구글 스프레드시트 '매도모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_holding = row_data[0].index('보유일')
idx_strategy = row_data[0].index('매도전략')
idx_loss = row_data[0].index('손절가')
idx_sellprice = row_data[0].index('목표가')
if len(row_data) > 1:
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
if row[idx_holding] == '' : raise Exception('보유일 오류')
if row[idx_strategy] == '': raise Exception('매도전략 오류')
if row[idx_loss] == '': raise Exception('손절가 오류')
if row[idx_strategy] == '4' and row[idx_sellprice] == '': raise Exception('목표가 오류')
except Exception as e:
if str(e) != '보유일 오류' and str(e) != '매도전략 오류' and str(e) != '손절가 오류'and str(e) != '목표가 오류': e = '종목명 오류'
print('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
logger.error('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
Telegram('[XTrader]구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
# print(data)
print('[XTrader]구글 시트 확인 완료')
# Telegram('[XTrader]구글 시트 확인 완료')
# logger.info('[XTrader]구글 시트 확인 완료')
return data
except Exception as e:
# 구글 시트 import error시 에러 없어을 때 백업한 csv 읽어옴
print("import_googlesheet Error : %s"%e)
logger.error("import_googlesheet Error : %s"%e)
backup_file = datetime.date.today().strftime('%Y%m%d') + '_googlesheetdata.csv'
if backup_file in os.listdir():
data = pd.read_csv(backup_file, encoding='euc-kr')
data = data.fillna('')
data = data.astype(str)
data['종목코드'] = data['종목코드'].apply(fix_stockcode)
print("import googlesheet backup_file")
logger.info("import googlesheet backup_file")
return data
# Telegram Setting *****************************************
with open('./secret/telegram_token.txt', mode='r') as tokenfile:
TELEGRAM_TOKEN = tokenfile.readline().strip()
with open('./secret/chatid.txt', mode='r') as chatfile:
CHAT_ID = int(chatfile.readline().strip())
bot = telepot.Bot(TELEGRAM_TOKEN)
with open('./secret/Telegram.txt', mode='r') as tokenfile:
r = tokenfile.read()
TELEGRAM_TOKEN_yoo = r.split('\n')[0].split(', ')[1]
CHAT_ID_yoo = r.split('\n')[1].split(', ')[1]
bot_yoo = telepot.Bot(TELEGRAM_TOKEN_yoo)
telegram_enable = True
def Telegram(str, send='all'):
try:
if telegram_enable == True:
# if send == 'mc':
# bot.sendMessage(CHAT_ID, str)
# else:
# bot.sendMessage(CHAT_ID, str)
# bot_yoo.sendMessage(CHAT_ID_yoo, str)
bot.sendMessage(CHAT_ID, str)
else:
pass
except Exception as e:
Telegram('[StockTrader]Telegram Error : %s' % e, send='mc')
# Slack Setting ***********************************************
# with open('./secret/slack_token.txt', mode='r') as tokenfile:
# SLACK_TOKEN = tokenfile.readline().strip()
# slack = Slacker(SLACK_TOKEN)
# slack_enable = False
# def Slack(str):
# if slack_enable == True:
# slack.chat.post_message('#log', str)
# else:
# pass
# 매수 후 보유기간 계산 *****************************************
today = datetime.date.today()
def holdingcal(base_date, excluded=(6, 7)): # 예시 base_date = '2018-06-23'
yy = int(base_date[:4]) # 연도
mm = int(base_date[5:7]) # 월
dd = int(base_date[8:10]) # 일
base_d = datetime.date(yy, mm, dd)
delta = 0
while base_d <= today:
if base_d.isoweekday() not in excluded:
delta += 1
base_d += datetime.timedelta(days=1)
return delta # 당일도 1일로 계산됨
# 호가 계산(상한가, 현재가) *************************************
def hogacal(price, diff, market, option):
# diff 0 : 상한가 호가, -1 : 상한가 -1호가
if option == '현재가':
cal_price = price
elif option == '상한가':
cal_price = price * 1.3
if cal_price < 1000:
hogaunit = 1
elif cal_price < 5000:
hogaunit = 5
elif cal_price < 10000:
hogaunit = 10
elif cal_price < 50000:
hogaunit = 50
elif cal_price < 100000 and market == "KOSPI":
hogaunit = 100
elif cal_price < 500000 and market == "KOSPI":
hogaunit = 500
elif cal_price >= 500000 and market == "KOSPI":
hogaunit = 1000
elif cal_price >= 50000 and market == "KOSDAQ":
hogaunit = 100
cal_price = int(cal_price / hogaunit) * hogaunit + (hogaunit * diff)
return cal_price
# 종목별 현재가 크롤링 ******************************************
def crawler_price(code):
code = code[1:]
url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
tag = soup.find("td", {"class": "num"})
return int(tag.text.replace(',',''))
로봇거래계좌번호 = None
주문딜레이 = 0.25
초당횟수제한 = 5
## 키움증권 제약사항 - 3.7초에 한번 읽으면 지금까지는 괜찮음
주문지연 = 3700 # 3.7초
로봇스크린번호시작 = 9000
로봇스크린번호종료 = 9999
# Table View 데이터 정리
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data=None, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
if data is None:
self._data = DataFrame()
def rowCount(self, parent=None):
# return len(self._data.values)
return len(self._data.index)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
# return QtCore.QVariant(str(self._data.values[index.row()][index.column()]))
return str(self._data.values[index.row()][index.column()])
# return QtCore.QVariant()
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self._data.columns[column]
return int(column + 1)
def update(self, data):
self._data = data
self.reset()
def reset(self):
self.beginResetModel()
# unnecessary call to actually clear data, but recommended by design guidance from Qt docs
# left blank in preliminary testing
self.endResetModel()
def flags(self, index):
return QtCore.Qt.ItemIsEnabled
# 포트폴리오에 사용되는 주식정보 클래스
# TradeShortTerm용 포트폴리오
class CPortStock_ShortTerm(object):
def __init__(self, 번호, 매수일, 종목코드, 종목명, 시장, 매수가, 매수조건, 보유일, 매도전략, 매도구간별조건, 매도구간=1, 매도가=0, 수량=0):
self.번호 = 번호
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.매수조건 = 매수조건
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간별조건 = 매도구간별조건
self.매도구간 = 매도구간
self.매도가 = 매도가
self.수량 = 수량
if self.매도전략 == '2' or self.매도전략 == '3':
self.목표도달 = False # 목표가(매도가) 도달 체크(False 상태로 구간 컷일경우 전량 매도)
self.매도조건 = '' # 구간매도 : B, 목표매도 : T
elif self.매도전략 == '4':
self.sellcount = 0
self.매도단위수량 = 0 # 전략4의 기본 매도 단위는 보유수량의 1/3
self.익절가1도달 = False
self.익절가2도달 = False
self.목표가도달 = False
# TradeLongTerm용 포트폴리오
class CPortStock_LongTerm(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.수량 = 수량
# 기본 로봇용 포트폴리오
class CPortStock(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 보유일, 매도전략, 매도구간=0, 매도전략변경1=False, 매도전략변경2=False, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간 = 매도구간
self.매도전략변경1 = 매도전략변경1
self.매도전략변경2 = 매도전략변경2
self.수량 = 수량
# CTrade 거래로봇용 베이스클래스 : OpenAPI와 붙어서 주문을 내는 등을 하는 클래스
class CTrade(object):
def __init__(self, sName, UUID, kiwoom=None, parent=None):
"""
:param sName: 로봇이름
:param UUID: 로봇구분용 id
:param kiwoom: 키움OpenAPI
:param parent: 나를 부른 부모 - 보통은 메인윈도우
"""
# print("CTrade : __init__")
self.sName = sName
self.UUID = UUID
self.sAccount = None # 거래용계좌번호
self.kiwoom = kiwoom
self.parent = parent
self.running = False # 실행상태
self.portfolio = dict() # 포트폴리오 관리 {'종목코드':종목정보}
self.현재가 = dict() # 각 종목의 현재가
# 조건 검색식 종목 읽기
def GetCodes(self, Index, Name, Type):
logger.info("[%s]조건 검색식 종목 읽기"%(self.sName))
# self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
# self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
# self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
try:
self.getConditionLoad()
print('getload 완료')
print('조건 검색 :', Name, int(Index), Type)
codelist = self.sendCondition("0156", Name, int(Index), Type) # 선정된 검색조건식으로 바로 종목 검색
print('GetCodes :', self.codeList)
return self.codeList
except Exception as e:
print("GetCondition_Error")
print(e)
def getConditionLoad(self):
print('getConditionLoad')
self.kiwoom.dynamicCall("GetConditionLoad()")
# receiveConditionVer() 이벤트 메서드에서 루프 종료
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
def getConditionNameList(self):
print('getConditionNameList')
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
# print(conditionDictionary)
return conditionDictionary
# 조건식 조회
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
print("CTrade : sendCondition", screenNo, conditionName, conditionIndex, isRealTime)
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int)",
screenNo, conditionName, conditionIndex, isRealTime)
# receiveTrCondition() 이벤트 메서드에서 루프 종료
# 실시간 검색일 경우 Loop 미적용해서 바로 조회 등록이 되게 해야됨
# if self.조건검색타입 ==0:
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
# 조건식 조회 중지
def sendConditionStop(self, screenNo, conditionName, conditionIndex):
# print("CTrade : sendConditionStop", screenNo, conditionName, conditionIndex)
isRequest = self.kiwoom.dynamicCall("SendConditionStop(QString, QString, int)",
screenNo, conditionName, conditionIndex)
# 계좌 보유 종목 받음
def InquiryList(self, _repeat=0):
# print("CTrade : InquiryList")
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat, '{:04d}'.format(self.sScreenNo))
self.InquiryLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.InquiryLoop.exec_()
# 금일 매도 종목에 대해서 수익률, 수익금, 수수료 요청(일별종목별실현손익요청)
def DailyProfit(self, 금일매도종목):
_repeat = 0
# self.sAccount = 로봇거래계좌번호
# self.sScreenNo = self.ScreenNumber
시작일자 = datetime.date.today().strftime('%Y%m%d')
cnt = 1
for 종목코드 in 금일매도종목:
# print(self.sScreenNo, 종목코드, 시작일자)
self.update_cnt = len(금일매도종목) - cnt
cnt += 1
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "시작일자", 시작일자)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "일자별종목별실현손익요청", "OPT10072",
_repeat, '{:04d}'.format(self.sScreenNo))
self.DailyProfitLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.DailyProfitLoop.exec_()
# 일별종목별실현손익 응답 결과 구글 업로드
def DailyProfitUpload(self, 매도결과):
# 매도결과 ['종목명','체결량','매입단가','체결가','당일매도손익','손익율','당일매매수수료','당일매매세금']
print(매도결과)
if self.sName == 'TradeShortTerm':
history_sheet = shortterm_history_sheet
history_cols = shortterm_history_cols
elif self.sName == 'TradeCondition':
history_sheet = condition_history_sheet
history_cols = condition_history_cols
try:
code_row = history_sheet.findall(매도결과[0])[-1].row
계산수익률 = round((int(float(매도결과[3])) / int(float(매도결과[2])) - 1) * 100, 2)
cell = alpha_list[history_cols.index('매수가')] + str(code_row) # 매입단가
history_sheet.update_acell(cell, int(float(매도결과[2])))
cell = alpha_list[history_cols.index('매도가')] + str(code_row) # 체결가
history_sheet.update_acell(cell, int(float(매도결과[3])))
cell = alpha_list[history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
history_sheet.update_acell(cell, 계산수익률)
cell = alpha_list[history_cols.index('수익률')] + str(code_row) # 손익율
history_sheet.update_acell(cell, 매도결과[5])
cell = alpha_list[history_cols.index('수익금')] + str(code_row) # 손익율
history_sheet.update_acell(cell, int(float(매도결과[4])))
cell = alpha_list[history_cols.index('세금+수수료')] + str(code_row) # 당일매매수수료 + 당일매매세금
history_sheet.update_acell(cell, int(float(매도결과[6])) + int(float(매도결과[7])))
self.DailyProfitLoop.exit()
if self.update_cnt == 0:
print('금일 실현 손익 구글 업로드 완료')
Telegram("[StockTrader]금일 실현 손익 구글 업로드 완료")
logger.info("[StockTrader]금일 실현 손익 구글 업로드 완료")
except:
self.DailyProfitLoop.exit() # 강제 루프 해제
print('[StockTrader]CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
logger.error('CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
# 포트폴리오의 상태
def GetStatus(self):
# print("CTrade : GetStatus")
try:
result = []
for p, v in self.portfolio.items():
result.append('%s(%s)[P%s/V%s/D%s]' % (v.종목명.strip(), v.종목코드, v.매수가, v.수량, v.매수일))
return [self.__class__.__name__, self.sName, self.UUID, self.sScreenNo, self.running, len(self.portfolio), ','.join(result)]
except Exception as e:
print('CTrade_GetStatus Error', e)
logger.error('CTrade_GetStatus Error : %s' % e)
def GenScreenNO(self):
"""
:return: 키움증권에서 요구하는 스크린번호를 생성
"""
# print("CTrade : GenScreenNO")
self.SmallScreenNumber += 1
if self.SmallScreenNumber > 9999:
self.SmallScreenNumber = 0
return self.sScreenNo * 10000 + self.SmallScreenNumber
def GetLoginInfo(self, tag):
"""
:param tag:
:return: 로그인정보 호출
"""
# print("CTrade : GetLoginInfo")
return self.kiwoom.dynamicCall('GetLoginInfo("%s")' % tag)
def KiwoomConnect(self):
"""
:return: 키움증권OpenAPI의 CallBack에 대응하는 처리함수를 연결
"""
# print("CTrade : KiwoomConnect")
try:
self.kiwoom.OnEventConnect[int].connect(self.OnEventConnect)
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData)
self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData)
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
except Exception as e:
print("CTrade : [%s]KiwoomConnect Error :"&(self.sName, e))
# logger.info("%s : connected" % self.sName)
def KiwoomDisConnect(self):
"""
:return: Callback 연결해제
"""
# print("CTrade : KiwoomDisConnect")
try:
self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect)
except Exception:
pass
try:
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
except Exception:
pass
try:
self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData)
except Exception:
pass
try:
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData)
except Exception:
pass
# logger.info("%s : disconnected" % self.sName)
def KiwoomAccount(self):
"""
:return: 계좌정보를 읽어옴
"""
# print("CTrade : KiwoomAccount")
ACCOUNT_CNT = self.GetLoginInfo('ACCOUNT_CNT')
ACC_NO = self.GetLoginInfo('ACCNO')
self.account = ACC_NO.split(';')[0:-1]
self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.account[0])
self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "d+2예수금요청", "opw00001", 0, '{:04d}'.format(self.sScreenNo))
self.depositLoop = QEventLoop() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
self.depositLoop.exec_()
# logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo):
"""
OpenAPI 메뉴얼 참조
:param sRQName:
:param sScreenNo:
:param sAccNo:
:param nOrderType:
:param sCode:
:param nQty:
:param nPrice:
:param sHogaGb:
:param sOrgOrderNo:
:return:
"""
# print("CTrade : KiwoomSendOrder")
try:
order = self.kiwoom.dynamicCall(
'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)',
[sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo])
return order
except Exception as e:
print('CTrade_KiwoomSendOrder Error ', e)
Telegram('[StockTrader]CTrade_KiwoomSendOrder Error: %s' % e, send='mc')
logger.error('CTrade_KiwoomSendOrder Error : %s' % e)
# -거래구분값 확인(2자리)
#
# 00 : 지정가
# 03 : 시장가
# 05 : 조건부지정가
# 06 : 최유리지정가
# 07 : 최우선지정가
# 10 : 지정가IOC
# 13 : 시장가IOC
# 16 : 최유리IOC
# 20 : 지정가FOK
# 23 : 시장가FOK
# 26 : 최유리FOK
# 61 : 장전 시간외단일가매매
# 81 : 장후 시간외종가
# 62 : 시간외단일가매매
#
# -매매구분값 (1 자리)
# 1 : 신규매수
# 2 : 신규매도
# 3 : 매수취소
# 4 : 매도취소
# 5 : 매수정정
# 6 : 매도정정
def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:param sRealType:
:return:
"""
# print("CTrade : KiwoomSetRealReg")
ret = self.kiwoom.dynamicCall('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10',
sRealType)
return ret
def KiwoomSetRealRemove(self, sScreenNo, sCode):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:return:
"""
# print("CTrade : KiwoomSetRealRemove")
ret = self.kiwoom.dynamicCall('SetRealRemove(QString, QString)', sScreenNo, sCode)
return ret
def OnEventConnect(self, nErrCode):
"""
OpenAPI 메뉴얼 참조
:param nErrCode:
:return:
"""
# print("CTrade : OnEventConnect")
logger.debug('OnEventConnect', nErrCode)
def OnReceiveMsg(self, sScrNo, sRQName, sTRCode, sMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sMsg:
:return:
"""
# print("CTrade : OnReceiveMsg")
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTRCode, sMsg))
# self.InquiryLoop.exit()
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sRecordName:
:param sPreNext:
:param nDataLength:
:param sErrorCode:
:param sMessage:
:param sSPlmMsg:
:return:
"""
# print('CTrade : OnReceiveTrData')
try:
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo[:4]):
return
if 'B_' in sRQName or 'S_' in sRQName:
주문번호 = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, 0, "주문번호")
# logger.debug("화면번호: %s sRQName : %s 주문번호: %s" % (sScrNo, sRQName, 주문번호))
self.주문등록(sRQName, 주문번호)
if sRQName == "d+2예수금요청":
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)',sTRCode, "", sRQName, 0, "d+2추정예수금")
# 입력된 문자열에 대해 lstrip 메서드를 통해 문자열 왼쪽에 존재하는 '-' 또는 '0'을 제거. 그리고 format 함수를 통해 천의 자리마다 콤마를 추가한 문자열로 변경
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
format_data = format(int(strip_data), ',d')
if data.startswith('-'):
format_data = '-' + format_data
self.sAsset = format_data
self.depositLoop.exit() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
if sRQName == "계좌평가잔고내역요청":
print("계좌평가잔고내역요청_수신")
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
self.CList = []
for i in range(0, cnt):
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, '종목번호').strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
S = self.종목코드변환(S) # 종목코드 맨 첫 'A'를 삭제하기 위함
self.CList.append(S)
# logger.debug("%s" % row)
if sPreNext == '2':
self.remained_data = True
self.InquiryList(_repeat=2)
else:
self.remained_data = False
print(self.CList)
self.InquiryLoop.exit()
if sRQName == "일자별종목별실현손익요청":
try:
data_idx = ['종목명', '체결량', '매입단가', '체결가', '당일매도손익', '손익율', '당일매매수수료', '당일매매세금']
result = []
for idx in data_idx:
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode,
"",
sRQName, 0, idx)
result.append(data.strip())
self.DailyProfitUpload(result)
except Exception as e:
print(e)
logger.error('일자별종목별실현손익요청 Error : %s' % e)
except Exception as e:
print('CTrade_OnReceiveTrData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveTrData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveTrData Error : %s' % e)
def OnReceiveChejanData(self, sGubun, nItemCnt, sFidList):
"""
OpenAPI 메뉴얼 참조
:param sGubun:
:param nItemCnt:
:param sFidList:
:return:
"""
# logger.debug('OnReceiveChejanData [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
# 주문체결시 순서
# 1 구분:0 GetChejanData(913) = '접수'
# 2 구분:0 GetChejanData(913) = '체결'
# 3 구분:1 잔고정보
"""
# sFid별 주요데이터는 다음과 같습니다.
# "9201" : "계좌번호"
# "9203" : "주문번호"
# "9001" : "종목코드"
# "913" : "주문상태"
# "302" : "종목명"
# "900" : "주문수량"
# "901" : "주문가격"
# "902" : "미체결수량"
# "903" : "체결누계금액"
# "904" : "원주문번호"
# "905" : "주문구분"
# "906" : "매매구분"
# "907" : "매도수구분"
# "908" : "주문/체결시간"
# "909" : "체결번호"
# "910" : "체결가"
# "911" : "체결량"
# "10" : "현재가"
# "27" : "(최우선)매도호가"
# "28" : "(최우선)매수호가"
# "914" : "단위체결가"
# "915" : "단위체결량"
# "919" : "거부사유"
# "920" : "화면번호"
# "917" : "신용구분"
# "916" : "대출일"
# "930" : "보유수량"
# "931" : "매입단가"
# "932" : "총매입가"
# "933" : "주문가능수량"
# "945" : "당일순매수수량"
# "946" : "매도/매수구분"
# "950" : "당일총매도손일"
# "951" : "예수금"
# "307" : "기준가"
# "8019" : "손익율"
# "957" : "신용금액"
# "958" : "신용이자"
# "918" : "만기일"
# "990" : "당일실현손익(유가)"
# "991" : "당일실현손익률(유가)"
# "992" : "당일실현손익(신용)"
# "993" : "당일실현손익률(신용)"
# "397" : "파생상품거래단위"
# "305" : "상한가"
# "306" : "하한가"
"""
# print("CTrade : OnReceiveChejanData")
try:
# 접수
if sGubun == "0":
# logger.debug('OnReceiveChejanData: 접수 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
화면번호 = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
if len(화면번호.replace(' ','')) == 0 : # 로봇 실행중 영웅문으로 주문 발생 시 화면번호가 ' '로 들어와 에러발생함 방지
print('다른 프로그램을 통한 거래 발생')
Telegram('다른 프로그램을 통한 거래 발생', send='mc')
logger.info('다른 프로그램을 통한 거래 발생')
return
elif self.sScreenNo != int(화면번호[:4]):
return
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9203)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['주문업무분류'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 912)
# 접수 / 체결 확인
# 주문상태(10:원주문, 11:정정주문, 12:취소주문, 20:주문확인, 21:정정확인, 22:취소확인, 90-92:주문거부)
param['주문상태'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 913) # 접수 or 체결 확인
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['주문수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 900)
param['주문가격'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 901)
param['미체결수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 902)
param['체결누계금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 903)
param['원주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 904)
param['주문구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 905)
param['매매구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 906)
param['매도수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 907)
param['체결시간'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 908)
param['체결번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 909)
param['체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 910)
param['체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 911)
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['단위체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 914).strip()
param['단위체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 915)
param['화면번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
param['당일매매수수료'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 938)
param['당일매매세금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 939)
param['체결수량'] = int(param['주문수량']) - int(param['미체결수량'])
logger.debug('접수 - 주문상태:{주문상태} 계좌번호:{계좌번호} 체결시간:{체결시간} 주문번호:{주문번호} 체결번호:{체결번호} 종목코드:{종목코드} 종목명:{종목명} 체결량:{체결량} 체결가:{체결가} 단위체결가:{단위체결가} 주문수량:{주문수량} 체결수량:{체결수량} 단위체결량:{단위체결량} 미체결수량:{미체결수량} 당일매매수수료:{당일매매수수료} 당일매매세금:{당일매매세금}'.format(**param))
# if param["주문상태"] == "접수":
# self.접수처리(param)
# if param["주문상태"] == "체결": # 매도의 경우 체결로 안들어옴
# self.체결처리(param)
self.체결처리(param)
# 잔고통보
if sGubun == "1":
# logger.debug('OnReceiveChejanData: 잔고통보 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['신용구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 917)
param['대출일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 916)
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['보유수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 930)
param['매입단가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 931)
param['총매입가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 932)
param['주문가능수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 933)
param['당일순매수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 945)
param['매도매수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 946)
param['당일총매도손익'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 950)
param['예수금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 951)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['기준가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 307)
param['손익율'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 8019)
param['신용금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 957)
param['신용이자'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 958)
param['만기일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 918)
param['당일실현손익_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 990)
param['당일실현손익률_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 991)
param['당일실현손익_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 992)
param['당일실현손익률_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 993)
param['담보대출수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 959)
logger.debug('잔고통보 - 계좌번호:{계좌번호} 종목명:{종목명} 보유수량:{보유수량} 매입단가:{매입단가} 총매입가:{총매입가} 손익율:{손익율} 당일총매도손익:{당일총매도손익} 당일순매수량:{당일순매수량}'.format(**param))
self.잔고처리(param)
# 특이신호
if sGubun == "3":
logger.debug('OnReceiveChejanData: 특이신호 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
pass
except Exception as e:
print('CTrade_OnReceiveChejanData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveChejanData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveChejanData Error : %s' % e)
def OnReceiveRealData(self, sRealKey, sRealType, sRealData):
"""
OpenAPI 메뉴얼 참조
:param sRealKey:
:param sRealType:
:param sRealData:
:return:
"""
# logger.debug('OnReceiveRealData [%s] [%s] [%s]' % (sRealKey, sRealType, sRealData))
_now = datetime.datetime.now()
try:
if _now.strftime('%H:%M:%S') < '09:00:00': # 9시 이전 데이터 버림(장 시작 전에 테이터 들어오는 것도 많으므로 버리기 위함)
return
if sRealKey not in self.실시간종목리스트: # 리스트에 없는 데이터 버림
return
if sRealType == "주식시세" or sRealType == "주식체결":
param = dict()
param['종목코드'] = self.종목코드변환(sRealKey)
param['체결시간'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 20).strip()
param['현재가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 10).strip()
param['전일대비'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 11).strip()
param['등락률'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 12).strip()
param['매도호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 27).strip()
param['매수호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 28).strip()
param['누적거래량'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 13).strip()
param['시가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 16).strip()
param['고가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 17).strip()
param['저가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 18).strip()
param['거래회전율'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 31).strip()
param['시가총액'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 311).strip()
self.실시간데이터처리(param)
except Exception as e:
print('CTrade_OnReceiveRealData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveRealData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveRealData Error : %s' % e)
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
print('OnReceiveTrCondition')
try:
if strCodeList == "":
self.ConditionLoop.exit()
return []
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print(self.codeList)
logger.info("[%s]조건 검색 완료"%(self.sName))
self.ConditionLoop.exit()
print('OnReceiveTrCondition :', self.codeList)
return self.codeList
except Exception as e:
print("OnReceiveTrCondition_Error")
print(e)
def OnReceiveConditionVer(self, lRet, sMsg):
print('OnReceiveConditionVer')
try:
self.condition = self.getConditionNameList()
except Exception as e:
print("CTrade : OnReceiveConditionVer_Error")
finally:
self.ConditionLoop.exit()
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
# print("CTrade : OnReceiveRealCondition")
# OpenAPI 메뉴얼 참조
# :param sTrCode:
# :param strType:
# :param strConditionName:
# :param strConditionIndex:
# :return:
_now = datetime.datetime.now().strftime('%H:%M:%S')
if (_now >= '10:00:00' and _now < '13:00:00') or _now >= '15:17:00': # 10시부터 13시 이전 데이터 버림, 15시 17분 당일 매도 처리 후 데이터 버림
return
# logger.info('OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
print("실시간조검검색_종목코드: %s %s / Time : %s"%(sTrCode, "종목편입" if strType == "I" else "종목이탈", _now))
if strType == 'I':
self.실시간조건처리(sTrCode)
def 종목코드변환(self, code): # TR 통해서 받은 종목 코드에 A가 붙을 경우 삭제
return code.replace('A', '')
def 정량매수(self, sRQName, 종목코드, 매수가, 수량):
# sRQName = '정량매수%s' % self.sScreenNo
sScreenNo = self.GenScreenNO() # 주문을 낼때 마다 스크린번호를 생성
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
return ret
def 정액매수(self, sRQName, 종목코드, 매수가, 매수금액):
# sRQName = '정액매수%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 매수금액 // 매수가
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
# logger.debug('주문 - %s %s %s %s %s %s %s %s %s', sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('CTrade_정액매수 Error ', e)
Telegram('[StockTrader]CTrade_정액매수 Error : %s' % e, send='mc')
logger.error('CTrade_정액매수 Error : %s' % e)
def 정량매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정량매도%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('[%s]정량매도 Error '%(self.sName,e))
Telegram('[StockTrader][%s]정량매도 Error : %s' % (self.sName, e), send='mc')
logger.error('[%s]정량매도 Error : %s' % (self.sName, e))
def 정액매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정액매도%s' % self.sScreenNo
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
def 주문등록(self, sRQName, 주문번호):
self.주문번호_주문_매핑[주문번호] = sRQName
Ui_계좌정보조회, QtBaseClass_계좌정보조회 = uic.loadUiType("./UI/계좌정보조회.ui")
class 화면_계좌정보(QDialog, Ui_계좌정보조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_계좌정보, self).__init__(parent) # Initialize하는 형식
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량']
self.보이는컬럼 = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량'] # 주당 손익 -> 수익률(%)
self.result = []
self.KiwoomAccount()
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def KiwoomAccount(self):
ACCOUNT_CNT = self.kiwoom.dynamicCall('GetLoginInfo("ACCOUNT_CNT")')
ACC_NO = self.kiwoom.dynamicCall('GetLoginInfo("ACCNO")')
self.account = ACC_NO.split(';')[0:-1] # 계좌번호가 ;가 붙어서 나옴(에로 계좌가 3개면 111;222;333)
self.comboBox.clear()
self.comboBox.addItems(self.account)
logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (
sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if sRQName == "계좌평가잔고내역요청":
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
# print(j)
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# logger.debug("%s" % row)
if sPreNext == '2':
self.Request(_repeat=2)
else:
self.model.update(DataFrame(data=self.result, columns=self.보이는컬럼))
print(self.result)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
계좌번호 = self.comboBox.currentText().strip()
logger.debug("계좌번호 %s" % 계좌번호)
# KOA StudioSA에서 opw00018 확인
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", 계좌번호) # 8132495511
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat,'{:04d}'.format(self.sScreenNo))
# 조회 버튼(QtDesigner에서 조회버튼 누르고 오른쪽 하단에 시그널/슬롯편집기를 보면 조회버튼 시그널(clicked), 슬롯(Inquiry())로 확인가능함
def inquiry(self):
self.result = []
self.Request(_repeat=0)
def robot_account(self):
global 로봇거래계좌번호
로봇거래계좌번호 = self.comboBox.currentText().strip()
# sqlite3 사용
try:
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
robot_account = pickle.dumps(로봇거래계좌번호, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=True)
_robot_account = base64.encodebytes(robot_account)
cursor.execute("REPLACE into Setting(keyword, value) values (?, ?)",
['robotaccount', _robot_account])
conn.commit()
print("로봇 계좌 등록 완료")
except Exception as e:
print('robot_account', e)
Ui_일자별주가조회, QtBaseClass_일자별주가조회 = uic.loadUiType("./UI/일자별주가조회.ui")
class 화면_일별주가(QDialog, Ui_일자별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_일별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('일자별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['일자', '현재가', '거래량', '시가', '고가', '저가', '거래대금']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식일봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식일봉차트조회", "OPT10081", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_분별주가조회, QtBaseClass_분별주가조회 = uic.loadUiType("./UI/분별주가조회.ui")
class 화면_분별주가(QDialog, Ui_분별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_분별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('분별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['체결시간', '현재가', '시가', '고가', '저가', '거래량']
self.result = []
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
print('화면_분별주가 : OnReceiveTrData')
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식분봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and (S[0] == '-' or S[0] == '+'):
S = S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# df = DataFrame(data=self.result, columns=self.columns)
# df.to_csv('분봉.csv', encoding='euc-kr')
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df.to_csv('분봉.csv', encoding='euc-kr', index=False)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
틱범위 = self.comboBox_min.currentText()[0:2].strip()
if 틱범위[0] == '0':
틱범위 = 틱범위[1:]
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "틱범위", 틱범위)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식분봉차트조회", "OPT10080", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_업종정보, QtBaseClass_업종정보 = uic.loadUiType("./UI/업종정보조회.ui")
class 화면_업종정보(QDialog, Ui_업종정보):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_업종정보, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('업종정보 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명', '현재가', '대비기호', '전일대비', '등락률', '거래량', '비중', '거래대금', '상한', '상승', '보합', '하락', '하한',
'상장종목수']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage,
sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "업종정보조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['업종코드'] = self.업종코드
df.to_csv("업종정보.csv")
self.model.update(df[['업종코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.업종코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "업종코드", self.업종코드)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "업종정보조회", "OPT20003", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_업종별주가조회, QtBaseClass_업종별주가조회 = uic.loadUiType("./UI/업종별주가조회.ui")
class 화면_업종별주가(QDialog, Ui_업종별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_업종별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('업종별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['현재가', '거래량', '일자', '시가', '고가', '저가', '거래대금', '대업종구분', '소업종구분', '종목정보', '수정주가이벤트', '전일종가']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage,
sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "업종일봉조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['업종코드'] = self.업종코드
self.model.update(df[['업종코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.업종코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "업종코드", self.업종코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "업종일봉조회", "OPT20006", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
class 화면_종목별투자자(QDialog, Ui_일자별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_종목별투자자, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('종목별 투자자 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['일자', '현재가', '전일대비', '누적거래대금', '개인투자자', '외국인투자자', '기관계', '금융투자', '보험', '투신', '기타금융', '은행',
'연기금등', '국가', '내외국인', '사모펀드', '기타법인']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "종목별투자자조회":
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['종목코드'] = self.lineEdit_code.text().strip()
df_new = df[['종목코드'] + self.columns]
self.model.update(df_new)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
종목코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "금액수량구분", 2) # 1:금액, 2:수량
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "매매구분", 0) # 0:순매수, 1:매수, 2:매도
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "단위구분", 1) # 1000:천주, 1:단주
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "종목별투자자조회", "OPT10060", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_TradeShortTerm, QtBaseClass_TradeShortTerm = uic.loadUiType("./UI/TradeShortTerm.ui")
class 화면_TradeShortTerm(QDialog, Ui_TradeShortTerm):
def __init__(self, parent):
super(화면_TradeShortTerm, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.result = []
def inquiry(self):
# Google spreadsheet 사용
try:
self.data = import_googlesheet()
print(self.data)
self.model.update(self.data)
for i in range(len(self.data.columns)):
self.tableView.resizeColumnToContents(i)
except Exception as e:
print('화면_TradeShortTerm : inquiry Error ', e)
logger.error('화면_TradeShortTerm : inquiry Error : %s' % e)
class CTradeShortTerm(CTrade): # 로봇 추가 시 __init__ : 복사, Setting, 초기조건:전략에 맞게, 데이터처리~Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.실시간종목리스트 = []
self.매수모니터링체크 = False
self.SmallScreenNumber = 9999
self.d = today
# 구글 스프레드시트에서 읽은 DataFrame에서 로봇별 종목리스트 셋팅
def set_stocklist(self, data):
self.Stocklist = dict()
self.Stocklist['컬럼명'] = list(data.columns)
for 종목코드 in data['종목코드'].unique():
temp_list = data[data['종목코드'] == 종목코드].values[0]
self.Stocklist[종목코드] = {
'번호': temp_list[self.Stocklist['컬럼명'].index('번호')],
'종목명': temp_list[self.Stocklist['컬럼명'].index('종목명')],
'종목코드': 종목코드,
'시장': temp_list[self.Stocklist['컬럼명'].index('시장')],
'투자비중': float(temp_list[self.Stocklist['컬럼명'].index('비중')]), # 저장 후 setting 함수에서 전략의 단위투자금을 곱함
'시가위치': list(map(float, temp_list[self.Stocklist['컬럼명'].index('시가위치')].split(','))),
'매수가': list(
int(float(temp_list[list(data.columns).index(col)].replace(',', ''))) for col in data.columns if
'매수가' in col and temp_list[list(data.columns).index(col)] != ''),
'매도전략': temp_list[self.Stocklist['컬럼명'].index('기본매도전략')],
'매도가': list(
int(float(temp_list[list(data.columns).index(col)].replace(',', ''))) for col in data.columns if
'목표가' in col and temp_list[list(data.columns).index(col)] != '')
}
return self.Stocklist
# RobotAdd 함수에서 초기화 다음 셋팅 실행해서 설정값 넘김
def Setting(self, sScreenNo, 매수방법='00', 매도방법='03', 종목리스트=pd.DataFrame()):
try:
self.sScreenNo = sScreenNo
self.실시간종목리스트 = []
self.매수방법 = 매수방법
self.매도방법 = 매도방법
self.종목리스트 = 종목리스트
self.Stocklist = self.set_stocklist(self.종목리스트) # 번호, 종목명, 종목코드, 시장, 비중, 시가위치, 매수가, 매도전략, 매도가
self.Stocklist['전략'] = {
'단위투자금': '',
'모니터링종료시간': '',
'보유일': '',
'투자금비중': '',
'매도구간별조건': [],
'전략매도가': [],
}
row_data = shortterm_strategy_sheet.get_all_values()
for data in row_data:
if data[0] == '단위투자금':
self.Stocklist['전략']['단위투자금'] = int(data[1])
elif data[0] == '매수모니터링 종료시간':
if len(data[1][:-3]) == 1:
data[1] = '0' + data[1]
self.Stocklist['전략']['모니터링종료시간'] = data[1] + ':00'
elif data[0] == '보유일':
self.Stocklist['전략']['보유일'] = int(data[1])
elif data[0] == '투자금 비중':
self.Stocklist['전략']['투자금비중'] = float(data[1][:-1])
# elif data[0] == '손절율':
# self.Stocklist['전략']['매도구간별조건'].append(float(data[1][:-1]))
# elif data[0] == '시가 위치':
# self.Stocklist['전략']['시가위치'] = list(map(int, data[1].split(',')))
elif '구간' in data[0]:
if data[0][-1] != '1' and data[0][-1] != '2':
self.Stocklist['전략']['매도구간별조건'].append(float(data[1][:-1]))
elif '손절가' == data[0]:
self.Stocklist['전략']['전략매도가'].append(float(data[1].replace('%', '')))
elif '본전가' == data[0]:
self.Stocklist['전략']['전략매도가'].append(float(data[1].replace('%', '')))
elif '익절가' in data[0]:
self.Stocklist['전략']['전략매도가'].append(float(data[1].replace('%', '')))
self.Stocklist['전략']['매도구간별조건'].insert(0, self.Stocklist['전략']['전략매도가'][0]) # 손절가
self.Stocklist['전략']['매도구간별조건'].insert(1, self.Stocklist['전략']['전략매도가'][1]) # 본전가
for code in self.Stocklist.keys():
if code == '컬럼명' or code == '전략':
continue
else:
self.Stocklist[code]['단위투자금'] = int(
self.Stocklist[code]['투자비중'] * self.Stocklist['전략']['단위투자금'])
self.Stocklist[code]['시가체크'] = False
self.Stocklist[code]['매수상한도달'] = False
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수총수량'] = 0 # 분할매수에 따른 수량체크
self.Stocklist[code]['매수수량'] = 0 # 분할매수 단위
self.Stocklist[code]['매수주문완료'] = 0 # 분할매수에 따른 매수 주문 수
self.Stocklist[code]['매수가전략'] = len(self.Stocklist[code]['매수가']) # 매수 전략에 따른 매수가 지정 수량
if self.Stocklist[code]['매도전략'] == '4':
self.Stocklist[code]['매도가'].append(self.Stocklist['전략']['전략매도가'])
print(self.Stocklist)
except Exception as e:
print('CTradeShortTerm_Setting Error :', e)
Telegram('[XTrader]CTradeShortTerm_Setting Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_Setting Error : %s' % e)
# 수동 포트폴리오 생성
def manual_portfolio(self):
self.portfolio = dict()
self.Stocklist = {
'024840': {'번호': '8.030', '종목명': 'KBI메탈', '종목코드': '024840', '시장': 'KOSDAQ', '매수전략': '1', '매수가': [1468],
'매수조건': 2, '수량': 310, '매도전략': '1', '매도가': [], '매수일': '2020/08/26 09:56:54'},
'097800': {'번호': '7.099', '종목명': '윈팩', '종목코드': '097800', '시장': 'KOSDAQ', '매수전략': '1', '매수가': [3219],
'매수조건': 1, '수량': 310, '매도전략': '4', '매도가': [3700], '매수일': '2020/05/29 09:22:39'},
'297090': {'번호': '7.101', '종목명': '씨에스베어링', '종목코드': '297090', '시장': 'KOSDAQ', '매수전략': '1', '매수가': [5000],
'매수조건': 3, '수량': 15, '매도전략': '2', '매도가': [], '매수일': '2020/06/03 09:12:15'},
}
self.strategy = {'전략': {'단위투자금': 200000, '모니터링종료시간': '10:30:00', '보유일': 20,
'투자금비중': 70.0, '매도구간별조건': [-2.7, 0.3, -3.0, -4.0, -5.0, -7.0],
'전략매도가': [-2.7, 0.3, 3.0, 6.0]}}
for code in list(self.Stocklist.keys()):
self.portfolio[code] = CPortStock_ShortTerm(번호=self.Stocklist[code]['번호'], 종목코드=code,
종목명=self.Stocklist[code]['종목명'],
시장=self.Stocklist[code]['시장'],
매수가=self.Stocklist[code]['매수가'][0],
매수조건=self.Stocklist[code]['매수조건'],
보유일=self.strategy['전략']['보유일'],
매도전략=self.Stocklist[code]['매도전략'],
매도가=self.Stocklist[code]['매도가'],
매도구간별조건=self.strategy['전략']['매도구간별조건'], 매도구간=1,
수량=self.Stocklist[code]['수량'],
매수일=self.Stocklist[code]['매수일'])
# google spreadsheet 매매이력 생성
def save_history(self, code, status):
# 매매이력 sheet에 해당 종목(매수된 종목)이 있으면 row를 반환 아니면 예외처리 -> 신규 매수로 처리
# 매수 이력 : 체결처리, 매수, 미체결수량 0에서 이력 저장
# 매도 이력 : 체결처리, 매도, 미체결수량 0에서 이력 저장
if status == '매도모니터링':
row = []
row.append(self.portfolio[code].번호)
row.append(self.portfolio[code].종목명)
row.append(self.portfolio[code].매수가)
shortterm_sell_sheet.append_row(row)
try:
code_row = shortterm_history_sheet.findall(self.portfolio[code].종목명)[-1].row # 종목명이 있는 모든 셀을 찾아서 맨 아래에 있는 셀을 선택
cell = alpha_list[shortterm_history_cols.index('매도가')] + str(code_row) # 매수 이력에 있는 종목이 매도가 되었는지 확인
sell_price = shortterm_history_sheet.acell(str(cell)).value
# 매도 이력은 추가 매도(매도전략2의 경우)나 신규 매도인 경우라 매도 이력 유무와 상관없음
if status == '매도': # 매도 이력은 포트폴리오에서 종목 pop을 하므로 Stocklist 데이터 사용
cell = alpha_list[shortterm_history_cols.index('매도가')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도체결가)
cell = alpha_list[shortterm_history_cols.index('매도수량')] + str(code_row)
수량 = shortterm_history_sheet.acell(cell).value # 분할 매도의 경우 이전 매도 수량이 기록되어 있음
if 수량 != '': self.portfolio[code].매도수량 += int(수량) # 매도수량은 주문 수량이므로 기존 수량을 합해줌
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도수량)
cell = alpha_list[shortterm_history_cols.index('매도일')] + str(code_row)
shortterm_history_sheet.update_acell(cell, datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
cell = alpha_list[shortterm_history_cols.index('매도전략')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도전략)
cell = alpha_list[shortterm_history_cols.index('매도구간')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도구간)
계산수익률 = round((self.portfolio[code].매도체결가 / self.portfolio[code].매수가 - 1) * 100, 2)
cell = alpha_list[shortterm_history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
shortterm_history_sheet.update_acell(cell, 계산수익률)
# 매수 이력은 있으나 매도 이력이 없음 -> 매도 전 추가 매수
if sell_price == '':
if status == '매수': # 포트폴리오 데이터 사용
cell = alpha_list[shortterm_history_cols.index('매수가')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매수가)
cell = alpha_list[shortterm_history_cols.index('매수수량')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].수량)
cell = alpha_list[shortterm_history_cols.index('매수일')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매수일)
cell = alpha_list[shortterm_history_cols.index('매수조건')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매수조건)
else: # 매도가가 기록되어 거래가 완료된 종목으로 판단하여 예외발생으로 신규 매수 추가함
raise Exception('매매완료 종목')
except Exception as e:
try:
# logger.debug('CTradeShortTerm_save_history Error1 : 종목명:%s, %s' % (self.portfolio[code].종목명, e))
row = []
row_buy = []
if status == '매수':
row.append(self.portfolio[code].번호)
row.append(self.portfolio[code].종목명)
row.append(self.portfolio[code].매수가)
row.append(self.portfolio[code].수량)
row.append(self.portfolio[code].매수일)
row.append(self.portfolio[code].매수조건)
shortterm_history_sheet.append_row(row)
except Exception as e:
print('CTradeShortTerm_save_history Error2 : 종목명:%s, %s' % (self.portfolio[code].종목명, e))
Telegram('[XTrade]CTradeShortTerm_save_history Error2 : 종목명:%s, %s' % (self.portfolio[code].종목명, e),
send='mc')
logger.error('CTradeShortTerm_save_history Error : 종목명:%s, %s' % (self.portfolio[code].종목명, e))
# 매수 전략별 매수 조건 확인
def buy_strategy(self, code, price):
result = False
condition = self.Stocklist[code]['매수조건'] # 초기값 0
qty = self.Stocklist[code]['매수수량'] # 초기값 0
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
매수가 = self.Stocklist[code]['매수가'] # [매수가1, 매수가2, 매수가3]
시가위치하한 = self.Stocklist[code]['시가위치'][0]
시가위치상한 = self.Stocklist[code]['시가위치'][1]
# 1. 금일시가 위치 체크(초기 한번)하여 매수조건(1~6)과 주문 수량 계산
if self.Stocklist[code]['시가체크'] == False: # 종목별로 초기에 한번만 시가 위치 체크를 하면 되므로 별도 함수 미사용
매수가.append(시가)
매수가.sort(reverse=True)
band = 매수가.index(시가) # band = 0 : 매수가1 이상, band=1: 매수가1, 2 사이, band=2: 매수가2,3 사이
매수가.remove(시가)
if band == len(매수가): # 매수가 지정한 구간보다 시가가 아래일 경우로 초기값이 result=False, condition=0 리턴
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수수량'] = 0
return False, 0, 0
else:
# 단위투자금으로 매수가능한 총 수량 계산, band = 0 : 매수가1, band=1: 매수가2, band=2: 매수가3 로 계산
self.Stocklist[code]['매수총수량'] = self.Stocklist[code]['단위투자금'] // 매수가[band]
if band == 0: # 시가가 매수가1보다 높은 경우
# 시가가 매수가1의 시가범위에 포함 : 조건 1, 2, 3
if 매수가[band] * (1 + 시가위치하한 / 100) <= 시가 and 시가 < 매수가[band] * (1 + 시가위치상한 / 100):
condition = len(매수가)
self.Stocklist[code]['매수가전략'] = len(매수가)
qty = self.Stocklist[code]['매수총수량'] // condition
else: # 시가 위치에 미포함
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수수량'] = 0
return False, 0, 0
else: # 시가가 매수가 중간인 경우 - 매수가1&2사이(band 1) : 조건 4,5 / 매수가2&3사이(band 2) : 조건 6
for i in range(band): # band 1일 경우 매수가 1은 불필요하여 삭제, band 2 : 매수가 1, 2 삭제(band수 만큼 삭제 실행)
매수가.pop(0)
if 매수가[0] * (1 + 시가위치하한 / 100) <= 시가: # 시가범위 포함
# 조건 4 = 매수가길이 1 + band 1 + 2(=band+1) -> 4 = 1 + 2*1 + 1
# 조건 5 = 매수가길이 2 + band 1 + 2(=band+1) -> 5 = 2 + 2*1 + 1
# 조건 6 = 매수가길이 1 + band 2 + 3(=band+1) -> 6 = 1 + 2*2 + 1
condition = len(매수가) + (2 * band) + 1
self.Stocklist[code]['매수가전략'] = len(매수가)
qty = self.Stocklist[code]['매수총수량'] // (condition % 2 + 1)
else:
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수수량'] = 0
return False, 0, 0
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = condition
self.Stocklist[code]['매수수량'] = qty
else: # 시가 위치 체크를 한 두번째 데이터 이후에는 condition이 0이면 바로 매수 불만족 리턴시킴
if condition == 0: # condition 0은 매수 조건 불만족
return False, 0, 0
# 매수조건 확정, 매수 수량 계산 완료
# 매수상한에 미도달한 상태로 매수가로 내려왔을 때 매수
# 현재가가 해당조건에서의 시가위치 상한 이상으로 오르면 매수상한도달을 True로 해서 매수하지 않게 함
if 현재가 >= 매수가[0] * (1 + 시가위치상한 / 100): self.Stocklist[code]['매수상한도달'] = True
if self.Stocklist[code]['매수주문완료'] < self.Stocklist[code]['매수가전략'] and self.Stocklist[code]['매수상한도달'] == False:
if 현재가 == 매수가[0]:
result = True
self.Stocklist[code]['매수주문완료'] += 1
print("매수모니터링 만족_종목:%s, 시가:%s, 조건:%s, 현재가:%s, 체크결과:%s, 수량:%s" % (
self.Stocklist[code]['종목명'], 시가, condition, 현재가, result, qty))
logger.debug("매수모니터링 만족_종목:%s, 시가:%s, 조건:%s, 현재가:%s, 체크결과:%s, 수량:%s" % (
self.Stocklist[code]['종목명'], 시가, condition, 현재가, result, qty))
return result, condition, qty
# 매도 구간 확인
def profit_band_check(self, 현재가, 매수가):
band_list = [0, 3, 5, 10, 15, 25]
# print('현재가, 매수가', 현재가, 매수가)
ratio = round((현재가 - 매수가) / 매수가 * 100, 2)
# print('ratio', ratio)
if ratio < 3:
return 1
elif ratio in band_list:
return band_list.index(ratio) + 1
else:
band_list.append(ratio)
band_list.sort()
band = band_list.index(ratio)
band_list.remove(ratio)
return band
# 매도 전략별 매도 조건 확인
def sell_strategy(self, code, price):
# print('%s 매도 조건 확인' % code)
try:
result = False
band = self.portfolio[code].매도구간 # 이전 매도 구간 받음
매도방법 = self.매도방법 # '03' : 시장가
qty_ratio = 1 # 매도 수량 결정 : 보유수량 * qty_ratio
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
매수가 = self.portfolio[code].매수가
# 전략 1, 2, 3과 4 별도 체크
strategy = self.portfolio[code].매도전략
# 전략 1, 2, 3
if strategy != '4':
# 매도를 위한 수익률 구간 체크(매수가 대비 현재가의 수익률 조건에 다른 구간 설정)
new_band = self.profit_band_check(현재가, 매수가)
if (hogacal(시가, 0, self.portfolio[code].시장, '상한가')) <= 현재가:
band = 7
if band < new_band: # 이전 구간보다 현재 구간이 높을 경우(시세가 올라간 경우)만
band = new_band # 구간을 현재 구간으로 변경(반대의 경우는 구간 유지)
if band == 1 and 현재가 <= 매수가 * (1 + (self.portfolio[code].매도구간별조건[0] / 100)):
result = True
elif band == 2 and 현재가 <= 매수가 * (1 + (self.portfolio[code].매도구간별조건[1] / 100)):
result = True
elif band == 3 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[2] / 100)):
result = True
elif band == 4 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[3] / 100)):
result = True
elif band == 5 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[4] / 100)):
result = True
elif band == 6 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[5] / 100)):
result = True
elif band == 7 and 현재가 >= (hogacal(시가, -3, self.Stocklist[code]['시장'], '상한가')):
매도방법 = '00' # 지정가
result = True
self.portfolio[code].매도구간 = band # 포트폴리오에 매도구간 업데이트
try:
if strategy == '2' or strategy == '3': # 매도전략 2(기존 5)
if strategy == '2':
목표가 = self.portfolio[code].매도가[0]
elif strategy == '3':
목표가 = (hogacal(시가 * 1.1, 0, self.Stocklist[code]['시장'], '현재가'))
매도조건 = self.portfolio[code].매도조건 # 매도가 실행된 조건 '': 매도 전, 'B':구간매도, 'T':목표가매도
target_band = self.profit_band_check(목표가, 매수가)
if band < target_band: # 현재가구간이 목표가구간 미만일때 전량매도
qty_ratio = 1
else: # 현재가구간이 목표가구간 이상일 때
if 현재가 == 목표가: # 목표가 도달 시 절반 매도
self.portfolio[code].목표도달 = True # 목표가 도달 여부 True
if 매도조건 == '': # 매도이력이 없는 경우 목표가매도 'T', 절반 매도
self.portfolio[code].매도조건 = 'T'
result = True
if self.portfolio[code].수량 == 1:
qty_ratio = 1
else:
qty_ratio = 0.5
elif 매도조건 == 'B': # 구간 매도 이력이 있을 경우 절반매도가 된 상태이므로 남은 전량매도
result = True
qty_ratio = 1
elif 매도조건 == 'T': # 목표가 매도 이력이 있을 경우 매도미실행
result = False
else: # 현재가가 목표가가 아닐 경우 구간 매도 실행(매도실행여부는 결정된 상태)
if self.portfolio[code].목표도달 == False: # 목표가 도달을 못한 경우면 전량매도
qty_ratio = 1
else:
if 매도조건 == '': # 매도이력이 없는 경우 구간매도 'B', 절반 매도
self.portfolio[code].매도조건 = 'B'
if self.portfolio[code].수량 == 1:
qty_ratio = 1
else:
qty_ratio = 0.5
elif 매도조건 == 'B': # 구간 매도 이력이 있을 경우 매도미실행
result = False
elif 매도조건 == 'T': # 목표가 매도 이력이 있을 경우 전량매도
qty_ratio = 1
except Exception as e:
print('sell_strategy 매도전략 2 Error :', e)
logger.error('CTradeShortTerm_sell_strategy 종목 : %s 매도전략 2 Error : %s' % (code, e))
Telegram('[XTrader]CTradeShortTerm_sell_strategy 종목 : %s 매도전략 2 Error : %s' % (code, e), send='mc')
result = False
return 매도방법, result, qty_ratio
# print('종목코드 : %s, 현재가 : %s, 시가 : %s, 고가 : %s, 매도구간 : %s, 결과 : %s' % (code, 현재가, 시가, 고가, band, result))
return 매도방법, result, qty_ratio
# 전략 4(지정가 00 매도)
else:
매도방법 = '00' # 지정가
try:
# 전략 4의 매도가 = [목표가(원), [손절가(%), 본전가(%), 1차익절가(%), 2차익절가(%)]]
# 1. 매수 후 손절가까지 하락시 매도주문 -> 손절가, 전량매도로 끝
if 현재가 <= 매수가 * (1 + self.portfolio[code].매도가[1][0] / 100):
self.portfolio[code].매도구간 = 0
result = True
qty_ratio = 1
# 2. 1차익절가 도달시 매도주문 -> 1차익절가, 1/3 매도
elif self.portfolio[code].익절가1도달 == False and 현재가 >= 매수가 * (
1 + self.portfolio[code].매도가[1][2] / 100):
self.portfolio[code].매도구간 = 1
self.portfolio[code].익절가1도달 = True
result = True
if self.portfolio[code].수량 == 1:
qty_ratio = 1
elif self.portfolio[code].수량 == 2:
qty_ratio = 0.5
else:
qty_ratio = 0.3
# 3. 2차익절가 도달못하고 본전가까지 하락 또는 고가 -3%까지시 매도주문 -> 1차익절가, 나머지 전량 매도로 끝
elif self.portfolio[code].익절가1도달 == True and self.portfolio[code].익절가2도달 == False and (
(현재가 <= 매수가 * (1 + self.portfolio[code].매도가[1][1] / 100)) or (현재가 <= 고가 * 0.97)):
self.portfolio[code].매도구간 = 1.5
result = True
qty_ratio = 1
# 4. 2차 익절가 도달 시 매도주문 -> 2차 익절가, 1/3 매도
elif self.portfolio[code].익절가1도달 == True and self.portfolio[code].익절가2도달 == False and 현재가 >= 매수가 * (
1 + self.portfolio[code].매도가[1][3] / 100):
self.portfolio[code].매도구간 = 2
self.portfolio[code].익절가2도달 = True
result = True
if self.portfolio[code].수량 == 1:
qty_ratio = 1
else:
qty_ratio = 0.5
# 5. 목표가 도달못하고 2차익절가까지 하락 시 매도주문 -> 2차익절가, 나머지 전량 매도로 끝
elif self.portfolio[code].익절가2도달 == True and self.portfolio[code].목표가도달 == False and (
(현재가 <= 매수가 * (1 + self.portfolio[code].매도가[1][2] / 100)) or (현재가 <= 고가 * 0.97)):
self.portfolio[code].매도구간 = 2.5
result = True
qty_ratio = 1
# 6. 목표가 도달 시 매도주문 -> 목표가, 나머지 전량 매도로 끝
elif self.portfolio[code].목표가도달 == False and 현재가 >= self.portfolio[code].매도가[0]:
self.portfolio[code].매도구간 = 3
self.portfolio[code].목표가도달 = True
result = True
qty_ratio = 1
return 매도방법, result, qty_ratio
except Exception as e:
print('sell_strategy 매도전략 4 Error :', e)
logger.error('CTradeShortTerm_sell_strategy 종목 : %s 매도전략 4 Error : %s' % (code, e))
Telegram('[XTrader]CTradeShortTerm_sell_strategy 종목 : %s 매도전략 4 Error : %s' % (code, e), send='mc')
result = False
return 매도방법, result, qty_ratio
except Exception as e:
print('CTradeShortTerm_sell_strategy Error ', e)
Telegram('[XTrader]CTradeShortTerm_sell_strategy Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_sell_strategy Error : %s' % e)
result = False
qty_ratio = 1
return 매도방법, result, qty_ratio
# 보유일 전략 : 보유기간이 보유일 이상일 경우 전량 매도 실행(Mainwindow 타이머에서 시간 체크)
def hold_strategy(self):
if self.holdcheck == True:
print('보유일 만기 매도 체크')
try:
for code in list(self.portfolio.keys()):
보유기간 = holdingcal(self.portfolio[code].매수일)
print('종목명 : %s, 보유일 : %s, 보유기간 : %s' % (self.portfolio[code].종목명, self.portfolio[code].보유일, 보유기간))
if 보유기간 >= int(self.portfolio[code].보유일) and self.주문실행중_Lock.get('S_%s' % code) is None and \
self.portfolio[code].수량 != 0:
self.portfolio[code].매도구간 = 0
(result, order) = self.정량매도(sRQName='S_%s' % code, 종목코드=code, 매도가=self.portfolio[code].매수가,
수량=self.portfolio[code].수량)
if result == True:
self.주문실행중_Lock['S_%s' % code] = True
Telegram('[XTrader]정량매도(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
logger.info('정량매도(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
else:
Telegram('[XTrader]정액매도실패(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
logger.info('정량매도실패(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
except Exception as e:
print("hold_strategy Error :", e)
# 포트폴리오 생성
def set_portfolio(self, code, buyprice, condition):
try:
self.portfolio[code] = CPortStock_ShortTerm(번호=self.Stocklist[code]['번호'], 종목코드=code,
종목명=self.Stocklist[code]['종목명'],
시장=self.Stocklist[code]['시장'], 매수가=buyprice,
매수조건=condition, 보유일=self.Stocklist['전략']['보유일'],
매도전략=self.Stocklist[code]['매도전략'],
매도가=self.Stocklist[code]['매도가'],
매도구간별조건=self.Stocklist['전략']['매도구간별조건'],
매수일=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.Stocklist[code]['매수일'] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') # 매매이력 업데이트를 위해 매수일 추가
except Exception as e:
print('CTradeShortTerm_set_portfolio Error ', e)
Telegram('[XTrader]CTradeShortTerm_set_portfolio Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_set_portfolio Error : %s' % e)
# Robot_Run이 되면 실행됨 - 매수/매도 종목을 리스트로 저장
def 초기조건(self, codes):
# 매수총액 계산하기
# 금일매도종목 리스트 변수 초기화
# 매도할종목 : 포트폴리오에 있던 종목 추가
# 매수할종목 : 구글에서 받은 종목 추가
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
self.금일매도종목 = [] # 장 마감 후 금일 매도한 종목에 대해서 매매이력 정리 업데이트(매도가, 손익률 등)
self.매도할종목 = []
self.매수할종목 = []
self.매수총액 = 0
self.holdcheck = False
for code in codes: # 구글 시트에서 import된 매수 모니커링 종목은 '매수할종목'에 추가
self.매수할종목.append(code)
# 포트폴리오에 있는 종목은 매도 관련 전략 재확인(구글시트) 및 '매도할종목'에 추가
if len(self.portfolio) > 0:
row_data = shortterm_sell_sheet.get_all_values()
idx_holding = row_data[0].index('보유일')
idx_strategy = row_data[0].index('매도전략')
idx_loss = row_data[0].index('손절가')
idx_sellprice = row_data[0].index('목표가')
for row in row_data[1:]:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
if code in list(self.portfolio.keys()):
self.portfolio[code].보유일 = row[idx_holding]
self.portfolio[code].매도전략 = row[idx_strategy]
self.portfolio[code].매도가 = [] # 매도 전략 변경에 따라 매도가 초기화
# 매도구간별조건 = [손절가(%), 본전가(%), 구간3 고가대비(%), 구간4 고가대비(%), 구간5 고가대비(%), 구간6 고가대비(%)]
self.portfolio[code].매도구간별조건 = []
self.portfolio[code].매도구간별조건.append(round(((int(float(row[idx_loss].replace(',', ''))) / self.portfolio[code].매수가) - 1) * 100, 1)) # 손절가를 퍼센트로 변환하여 업데이트
for idx in range(1, len(self.Stocklist['전략']['매도구간별조건'])): # Stocklist의 매도구간별조건 전체를 바로 append할 경우 모든 종목이 동일한 값으로 들어감
self.portfolio[code].매도구간별조건.append(self.Stocklist['전략']['매도구간별조건'][idx])
if self.portfolio[code].매도전략 == '4': # 매도가 = [목표가(원), [손절가(%), 본전가(%), 1차익절가(%), 2차익절가(%)]]
self.portfolio[code].매도가.append(int(float(row[idx_sellprice].replace(',', ''))))
self.portfolio[code].매도가.append([])
for idx in range(len(self.Stocklist['전략']['전략매도가'])): # Stocklist의 전략매도가 전체를 바로 append할 경우 모든 종목이 동일한 값으로 들어감
self.portfolio[code].매도가[1].append(self.Stocklist['전략']['전략매도가'][idx])
self.portfolio[code].매도가[1][0] = self.portfolio[code].매도구간별조건[0] # float(row[idx_loss].replace('%', ''))
self.portfolio[code].sellcount = 0
self.portfolio[code].매도단위수량 = 0 # 전략4의 기본 매도 단위는 보유수량의 1/3
self.portfolio[code].익절가1도달 = False
self.portfolio[code].익절가2도달 = False
self.portfolio[code].목표가도달 = False
else:
if self.portfolio[code].매도전략 == '2' or self.portfolio[code].매도전략 == '3':
self.portfolio[code].목표도달 = False # 목표가(매도가) 도달 체크(False 상태로 구간 컷일경우 전량 매도)
self.portfolio[code].매도조건 = '' # 구간매도 : B, 목표매도 : T
for port_code in list(self.portfolio.keys()):
# 로봇 시작 시 포트폴리오 종목의 매도구간(전일 매도모니터링)을 1로 초기화
# 구간이 내려가는 건 반영하지 않으므로 초기화를 시켜서 다시 구간 체크 시작하기 위함
self.portfolio[port_code].매도구간 = 1 # 매도 구간은 로봇 실행 시 마다 초기화시킴
# 매수총액계산
self.매수총액 += (self.portfolio[port_code].매수가 * self.portfolio[port_code].수량)
# 포트폴리오에 있는 종목이 구글에서 받아서 만든 Stocklist에 없을 경우만 추가함
# 이 조건이 없을 경우 구글에서 받은 전략들이 아닌 과거 전략이 포트폴리오에서 넘어감
# 근데 포트폴리오에 있는 종목을 왜 Stocklist에 넣어야되는지 모르겠음(내가 하고도...)
if port_code not in list(self.Stocklist.keys()):
self.Stocklist[port_code] = {
'번호': self.portfolio[port_code].번호,
'종목명': self.portfolio[port_code].종목명,
'종목코드': self.portfolio[port_code].종목코드,
'시장': self.portfolio[port_code].시장,
'매수조건': self.portfolio[port_code].매수조건,
'매수가': self.portfolio[port_code].매수가,
'매도전략': self.portfolio[port_code].매도전략,
'매도가': self.portfolio[port_code].매도가
}
self.매도할종목.append(port_code)
# for stock in df_keeplist['종목번호'].values: # 보유 종목 체크해서 매도 종목에 추가 → 로봇이 두개 이상일 경우 중복되므로 미적용
# self.매도할종목.append(stock)
# 종목명 = df_keeplist[df_keeplist['종목번호']==stock]['종목명'].values[0]
# 매입가 = df_keeplist[df_keeplist['종목번호']==stock]['매입가'].values[0]
# 보유수량 = df_keeplist[df_keeplist['종목번호']==stock]['보유수량'].values[0]
# print('종목코드 : %s, 종목명 : %s, 매입가 : %s, 보유수량 : %s' %(stock, 종목명, 매입가, 보유수량))
# self.portfolio[stock] = CPortStock_ShortTerm(종목코드=stock, 종목명=종목명, 매수가=매입가, 수량=보유수량, 매수일='')
def 실시간데이터처리(self, param):
try:
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 전일종가]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
self.wr.writerow([체결시간, 종목코드, 종목명, 현재가, 전일대비])
# 매수 조건
# 매수모니터링 종료 시간 확인
if current_time < self.Stocklist['전략']['모니터링종료시간']:
if 종목코드 in self.매수할종목 and 종목코드 not in self.금일매도종목:
# 매수총액 + 종목단위투자금이 투자총액보다 작음 and 매수주문실행중Lock에 없음 -> 추가매수를 위해서 and 포트폴리오에 없음 조건 삭제
if (self.매수총액 + self.Stocklist[종목코드]['단위투자금'] < self.투자총액) and self.주문실행중_Lock.get(
'B_%s' % 종목코드) is None and len(
self.Stocklist[종목코드]['매수가']) > 0: # and self.portfolio.get(종목코드) is None
# 매수 전략별 모니터링 체크
buy_check, condition, qty = self.buy_strategy(종목코드, 시세)
if buy_check == True and (self.Stocklist[종목코드]['단위투자금'] // 현재가 > 0):
(result, order) = self.정량매수(sRQName='B_%s' % 종목코드, 종목코드=종목코드, 매수가=현재가, 수량=qty)
if result == True:
if self.portfolio.get(종목코드) is None: # 포트폴리오에 없으면 신규 저장
self.set_portfolio(종목코드, 현재가, condition)
self.주문실행중_Lock['B_%s' % 종목코드] = True
Telegram('[XTrader]매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s, 매수수량=%s' % (
종목코드, 종목명, 현재가, condition, qty))
logger.info('매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s, 매수수량=%s' % (
종목코드, 종목명, 현재가, condition, qty))
else:
Telegram('[XTrader]매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s' % (
종목코드, 종목명, 현재가, condition))
logger.info('매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s' % (종목코드, 종목명, 현재가, condition))
else:
if self.매수모니터링체크 == False:
for code in self.매수할종목:
if self.portfolio.get(code) is not None and code not in self.매도할종목:
Telegram('[XTrader]매수모니터링마감 : 종목코드=%s, 종목명=%s 매도모니터링 전환' % (종목코드, 종목명))
logger.info('매수모니터링마감 : 종목코드=%s, 종목명=%s 매도모니터링 전환' % (종목코드, 종목명))
self.매수할종목.remove(code)
self.매도할종목.append(code)
self.매수모니터링체크 = True
logger.info('매도할 종목 :%s' % self.매도할종목)
# 매도 조건
if 종목코드 in self.매도할종목:
# 포트폴리오에 있음 and 매도주문실행중Lock에 없음 and 매수주문실행중Lock에 없음
if self.portfolio.get(종목코드) is not None and self.주문실행중_Lock.get(
'S_%s' % 종목코드) is None: # and self.주문실행중_Lock.get('B_%s' % 종목코드) is None:
# 매도 전략별 모니터링 체크
매도방법, sell_check, ratio = self.sell_strategy(종목코드, 시세)
if sell_check == True:
if 매도방법 == '00':
(result, order) = self.정액매도(sRQName='S_%s' % 종목코드, 종목코드=종목코드, 매도가=현재가,
수량=round(self.portfolio[종목코드].수량 * ratio))
else:
(result, order) = self.정량매도(sRQName='S_%s' % 종목코드, 종목코드=종목코드, 매도가=현재가,
수량=round(self.portfolio[종목코드].수량 * ratio))
if result == True:
self.주문실행중_Lock['S_%s' % 종목코드] = True
Telegram('[XTrader]매도주문 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (
종목코드, 종목명, 현재가, self.portfolio[종목코드].매도전략, self.portfolio[종목코드].매도구간,
int(self.portfolio[종목코드].수량 * ratio)))
if self.portfolio[종목코드].매도전략 == '2':
logger.info(
'매도주문 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 목표도달=%s, 매도조건=%s, 수량=%s' % (
종목코드, 종목명, 현재가, self.portfolio[종목코드].매도전략, self.portfolio[종목코드].매도구간,
self.portfolio[종목코드].목표도달, self.portfolio[종목코드].매도조건,
int(self.portfolio[종목코드].수량 * ratio)))
else:
logger.info('매도주문 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (
종목코드, 종목명, 현재가, self.portfolio[종목코드].매도전략, self.portfolio[종목코드].매도구간,
int(self.portfolio[종목코드].수량 * ratio)))
else:
Telegram(
'[XTrader]매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (종목코드, 종목명,
현재가,
self.portfolio[
종목코드].매도전략,
self.portfolio[
종목코드].매도구간,
self.portfolio[
종목코드].수량 * ratio))
logger.info('매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (종목코드, 종목명,
현재가,
self.portfolio[
종목코드].매도전략,
self.portfolio[
종목코드].매도구간,
self.portfolio[
종목코드].수량 * ratio))
except Exception as e:
print('CTradeShortTerm_실시간데이터처리 Error : %s, %s' % (종목명, e))
Telegram('[XTrader]CTradeShortTerm_실시간데이터처리 Error : %s, %s' % (종목명, e), send='mc')
logger.error('CTradeShortTerm_실시간데이터처리 Error :%s, %s' % (종목명, e))
def 접수처리(self, param):
pass
def 체결처리(self, param):
종목코드 = param['종목코드']
주문번호 = param['주문번호']
self.주문결과[주문번호] = param
주문수량 = int(param['주문수량'])
미체결수량 = int(param['미체결수량'])
체결가 = int(0 if (param['체결가'] is None or param['체결가'] == '') else param['체결가']) # 매입가 동일
단위체결량 = int(0 if (param['단위체결량'] is None or param['단위체결량'] == '') else param['단위체결량'])
당일매매수수료 = int(0 if (param['당일매매수수료'] is None or param['당일매매수수료'] == '') else param['당일매매수수료'])
당일매매세금 = int(0 if (param['당일매매세금'] is None or param['당일매매세금'] == '') else param['당일매매세금'])
# 매수
if param['매도수구분'] == '2':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매수가 = int(주문[2:])
# 단위체결가 = int(0 if (param['단위체결가'] is None or param['단위체결가'] == '') else param['단위체결가'])
# logger.debug('매수-------> %s %s %s %s %s' % (param['종목코드'], param['종목명'], 매수가, 주문수량 - 미체결수량, 미체결수량))
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
P.매수가 = 체결가 # 단위체결가
P.수량 += 단위체결량 # 추가 매수 대비해서 기존 수량에 체결된 수량 계속 더함(주문수량 - 미체결수량)
P.매수일 = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
logger.error('ERROR 포트에 종목이 없음 !!!!')
if 미체결수량 == 0:
try:
self.주문실행중_Lock.pop(주문)
if self.Stocklist[종목코드]['매수주문완료'] >= self.Stocklist[종목코드]['매수가전략']:
self.매수할종목.remove(종목코드)
self.매도할종목.append(종목코드)
Telegram('[XTrader]분할 매수 완료_종목명:%s, 종목코드:%s 매수가:%s, 수량:%s' % (P.종목명, 종목코드, P.매수가, P.수량))
logger.info('분할 매수 완료_종목명:%s, 종목코드:%s 매수가:%s, 수량:%s' % (P.종목명, 종목코드, P.매수가, P.수량))
self.Stocklist[종목코드]['수량'] = P.수량
self.Stocklist[종목코드]['매수가'].pop(0)
self.매수총액 += (P.매수가 * P.수량)
logger.debug('체결처리완료_종목명:%s, 매수총액계산완료:%s' % (P.종목명, self.매수총액))
self.save_history(종목코드, status='매수')
Telegram('[XTrader]매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
logger.info('매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
except Exception as e:
Telegram('[XTrader]체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e), send='mc')
logger.error('체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e))
# 매도
if param['매도수구분'] == '1':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매도가 = int(주문[2:])
try:
if 미체결수량 == 0:
self.주문실행중_Lock.pop(주문)
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
self.portfolio[종목코드].매도체결가 = 체결가
self.portfolio[종목코드].매도수량 = 주문수량
self.save_history(종목코드, status='매도')
Telegram('[XTrader]매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
logger.info('매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
except Exception as e:
Telegram('[XTrader]체결처리_매도 Error : %s' % e, send='mc')
logger.error('체결처리_매도 Error : %s' % e)
# 메인 화면에 반영
self.parent.RobotView()
def 잔고처리(self, param):
# print('CTradeShortTerm : 잔고처리')
종목코드 = param['종목코드']
P = self.portfolio.get(종목코드)
if P is not None:
P.매수가 = int(0 if (param['매입단가'] is None or param['매입단가'] == '') else param['매입단가'])
P.수량 = int(0 if (param['보유수량'] is None or param['보유수량'] == '') else param['보유수량'])
if P.수량 == 0:
self.portfolio.pop(종목코드)
self.매도할종목.remove(종목코드)
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
logger.info('잔고처리_포트폴리오POP %s ' % 종목코드)
# 메인 화면에 반영
self.parent.RobotView()
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
# self.manual_portfolio()
for code in list(self.portfolio.keys()):
print(self.portfolio[code].__dict__)
logger.info(self.portfolio[code].__dict__)
if flag == True:
print("%s ROBOT 실행" % (self.sName))
try:
Telegram("[XTrader]%s ROBOT 실행" % (self.sName))
self.sAccount = sAccount
self.투자총액 = floor(int(d2deposit.replace(",", "")) * (self.Stocklist['전략']['투자금비중'] / 100))
print('로봇거래계좌 : ', 로봇거래계좌번호)
print('D+2 예수금 : ', int(d2deposit.replace(",", "")))
print('투자 총액 : ', self.투자총액)
print('Stocklist : ', self.Stocklist)
# self.최대포트수 = floor(int(d2deposit.replace(",", "")) / self.단위투자금 / len(self.parent.robots))
# print(self.최대포트수)
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
codes = list(self.Stocklist.keys())
codes.remove('전략')
codes.remove('컬럼명')
self.초기조건(codes)
print("매도 : ", self.매도할종목)
print("매수 : ", self.매수할종목)
print("매수총액 : ", self.매수총액)
print("포트폴리오 매도모니터링 수정")
for code in list(self.portfolio.keys()):
print(self.portfolio[code].__dict__)
logger.info(self.portfolio[code].__dict__)
self.실시간종목리스트 = self.매도할종목 + self.매수할종목
logger.info("오늘 거래 종목 : %s %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
self.KiwoomConnect() # MainWindow 외에서 키움 API구동시켜서 자체적으로 API데이터송수신가능하도록 함
if len(self.실시간종목리스트) > 0:
self.f = open('data_result.csv', 'a', newline='')
self.wr = csv.writer(self.f)
self.wr.writerow(['체결시간', '종목코드', '종목명', '현재가', '전일대비'])
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';')
logger.debug("실시간데이타요청 등록결과 %s" % ret)
except Exception as e:
print('CTradeShortTerm_Run Error :', e)
Telegram('[XTrader]CTradeShortTerm_Run Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_Run Error : %s' % e)
else:
Telegram("[XTrader]%s ROBOT 실행 중지" % (self.sName))
print('Stocklist : ', self.Stocklist)
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
self.f.close()
del self.f
del self.wr
if self.portfolio is not None:
# 구글 매도모니터링 시트 기존 종목 삭제
num_data = shortterm_sell_sheet.get_all_values()
for i in range(len(num_data)):
shortterm_sell_sheet.delete_rows(2)
for code in list(self.portfolio.keys()):
# 매수 미체결 종목 삭제
if self.portfolio[code].수량 == 0:
self.portfolio.pop(code)
else:
# 포트폴리오 종목은 구글 매도모니터링 시트에 추가하여 전략 수정가능
self.save_history(code, status='매도모니터링')
if len(self.금일매도종목) > 0:
try:
Telegram("[XTrader]%s 금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목))
logger.info("%s 금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목))
self.parent.statusbar.showMessage("금일 매도 종목 손익 Upload")
self.DailyProfit(self.금일매도종목)
except Exception as e:
print('%s 금일매도종목 결과 업로드 Error : %s' % (self.sName, e))
finally:
del self.DailyProfitLoop # 금일매도결과 업데이트 시 QEventLoop 사용으로 로봇 저장 시 pickcle 에러 발생하여 삭제시킴
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
# 장기 투자용 : 현재 미리 선정한 종목에 대해서 로봇 시작과 동시에 매수 실행 적용
class CTradeLongTerm(CTrade): # 로봇 추가 시 __init__ : 복사, Setting, 초기조건:전략에 맞게, 데이터처리~Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.실시간종목리스트 = []
self.SmallScreenNumber = 9999
self.d = today
# RobotAdd 함수에서 초기화 다음 셋팅 실행해서 설정값 넘김
def Setting(self, sScreenNo, 매수방법='03', 매도방법='03', 종목리스트=[]):
self.sScreenNo = sScreenNo
self.실시간종목리스트 = []
self.매수방법 = 매수방법
self.매도방법 = 매도방법
# Robot_Run이 되면 실행됨 - 매수/매도 종목을 리스트로 저장
def 초기조건(self):
# 매수총액 계산하기
# 금일매도종목 리스트 변수 초기화
# 매도할종목 : 포트폴리오에 있던 종목 추가
# 매수할종목 : 구글에서 받은 종목 추가
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
self.금일매도종목 = [] # 장 마감 후 금일 매도한 종목에 대해서 매매이력 정리 업데이트(매도가, 손익률 등)
self.매도할종목 = []
self.매수할종목 = []
self.Stocklist = dict()
df = pd.read_csv('매수종목.csv', encoding='euc-kr')
codes= df['종목'].to_list()
qtys = df['수량'].to_list()
for 종목코드, 수량 in zip(codes, qtys):
code, name, market = get_code(종목코드)
self.Stocklist[code] = {
'종목명' : name,
'종목코드' : code,
'시장구분' : market,
'매수수량' : 수량
}
self.매수할종목 = list(self.Stocklist.keys())
# 포트폴리오에 있는 종목은 매도 관련 전략 재확인(구글시트) 및 '매도할종목'에 추가
if len(self.portfolio) > 0:
for port_code in list(self.portfolio.keys()):
self.매도할종목.append(port_code)
def 실시간데이터처리(self, param):
try:
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
시장구분 = self.parent.CODE_POOL[종목코드][0]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 전일종가]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
# 매수 조건
# 매수모니터링 종료 시간 확인
if current_time >= "09:00:00":
if 종목코드 in self.매수할종목 and 종목코드 not in self.금일매도종목 and self.주문실행중_Lock.get('B_%s' % 종목코드) is None:
(result, order) = self.정량매수(sRQName='B_%s' % 종목코드, 종목코드=종목코드, 매수가=현재가, 수량=self.수량[0])
if result == True:
self.portfolio[종목코드] = CPortStock_LongTerm(종목코드=종목코드, 종목명=종목명, 시장=시장구분, 매수가=현재가, 매수일=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.주문실행중_Lock['B_%s' % 종목코드] = True
Telegram('[StockTrader]매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수수량=%s' % (종목코드, 종목명, 현재가, self.수량[0]))
logger.info('매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수수량=%s' % (종목코드, 종목명, 현재가, self.수량[0]))
else:
Telegram('[StockTrader]매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (종목코드, 종목명, 현재가))
logger.info('매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (종목코드, 종목명, 현재가))
# 매도 조건
if 종목코드 in self.매도할종목:
pass
except Exception as e:
print('CTradeLongTerm_실시간데이터처리 Error : %s, %s' % (종목명, e))
Telegram('[StockTrader]CTradeLongTerm_실시간데이터처리 Error : %s, %s' % (종목명, e), send='mc')
logger.error('CTradeLongTerm_실시간데이터처리 Error :%s, %s' % (종목명, e))
def 접수처리(self, param):
pass
def 체결처리(self, param):
종목코드 = param['종목코드']
주문번호 = param['주문번호']
self.주문결과[주문번호] = param
주문수량 = int(param['주문수량'])
미체결수량 = int(param['미체결수량'])
체결가 = int(0 if (param['체결가'] is None or param['체결가'] == '') else param['체결가']) # 매입가 동일
단위체결량 = int(0 if (param['단위체결량'] is None or param['단위체결량'] == '') else param['단위체결량'])
당일매매수수료 = int(0 if (param['당일매매수수료'] is None or param['당일매매수수료'] == '') else param['당일매매수수료'])
당일매매세금 = int(0 if (param['당일매매세금'] is None or param['당일매매세금'] == '') else param['당일매매세금'])
# 매수
if param['매도수구분'] == '2':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매수가 = int(주문[2:])
# 단위체결가 = int(0 if (param['단위체결가'] is None or param['단위체결가'] == '') else param['단위체결가'])
# logger.debug('매수-------> %s %s %s %s %s' % (param['종목코드'], param['종목명'], 매수가, 주문수량 - 미체결수량, 미체결수량))
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
P.매수가 = 체결가 # 단위체결가
P.수량 += 단위체결량 # 추가 매수 대비해서 기존 수량에 체결된 수량 계속 더함(주문수량 - 미체결수량)
P.매수일 = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
logger.error('ERROR 포트에 종목이 없음 !!!!')
if 미체결수량 == 0:
try:
self.주문실행중_Lock.pop(주문)
self.매수할종목.remove(종목코드)
self.매도할종목.append(종목코드)
Telegram('[StockTrader]매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
logger.info('매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
except Exception as e:
Telegram('[XTrader]체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e), send='mc')
logger.error('체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e))
# 매도
if param['매도수구분'] == '1':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매도가 = int(주문[2:])
try:
if 미체결수량 == 0:
self.주문실행중_Lock.pop(주문)
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
self.portfolio[종목코드].매도체결가 = 체결가
self.portfolio[종목코드].매도수량 = 주문수량
Telegram('[StockTrader]매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
logger.info('매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
except Exception as e:
Telegram('[StockTrader]체결처리_매도 Error : %s' % e, send='mc')
logger.error('체결처리_매도 Error : %s' % e)
# 메인 화면에 반영
self.parent.RobotView()
def 잔고처리(self, param):
# print('CTradeShortTerm : 잔고처리')
종목코드 = param['종목코드']
P = self.portfolio.get(종목코드)
if P is not None:
P.매수가 = int(0 if (param['매입단가'] is None or param['매입단가'] == '') else param['매입단가'])
P.수량 = int(0 if (param['보유수량'] is None or param['보유수량'] == '') else param['보유수량'])
if P.수량 == 0:
self.portfolio.pop(종목코드)
self.매도할종목.remove(종목코드)
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
logger.info('잔고처리_포트폴리오POP %s ' % 종목코드)
# 메인 화면에 반영
self.parent.RobotView()
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
# self.manual_portfolio()
# for code in list(self.portfolio.keys()):
# print(self.portfolio[code].__dict__)
# logger.info(self.portfolio[code].__dict__)
if flag == True:
print("%s ROBOT 실행" % (self.sName))
try:
Telegram("[StockTrader]%s ROBOT 실행" % (self.sName))
self.sAccount = sAccount
self.투자총액 = floor(int(d2deposit.replace(",", "")) / len(self.parent.robots))
print('로봇거래계좌 : ', 로봇거래계좌번호)
print('D+2 예수금 : ', int(d2deposit.replace(",", "")))
print('투자 총액 : ', self.투자총액)
# self.최대포트수 = floor(int(d2deposit.replace(",", "")) / self.단위투자금 / len(self.parent.robots))
# print(self.최대포트수)
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.초기조건()
print("매도 : ", self.매도할종목)
print("매수 : ", self.매수할종목)
self.실시간종목리스트 = self.매도할종목 + self.매수할종목
logger.info("오늘 거래 종목 : %s %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
self.KiwoomConnect() # MainWindow 외에서 키움 API구동시켜서 자체적으로 API데이터송수신가능하도록 함
if len(self.실시간종목리스트) > 0:
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';')
logger.debug("[%s]실시간데이타요청 등록결과 %s" % (self.sName, ret))
except Exception as e:
print('CTradeShortTerm_Run Error :', e)
Telegram('[XTrader]CTradeShortTerm_Run Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_Run Error : %s' % e)
else:
Telegram("[StockTrader]%s ROBOT 실행 중지" % (self.sName))
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
if self.portfolio is not None:
for code in list(self.portfolio.keys()):
# 매수 미체결 종목 삭제
if self.portfolio[code].수량 == 0:
self.portfolio.pop(code)
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
Ui_TradeCondition, QtBaseClass_TradeCondition = uic.loadUiType("./UI/TradeCondition.ui")
class 화면_TradeCondition(QDialog, Ui_TradeCondition):
# def __init__(self, parent):
def __init__(self, sScreenNo, kiwoom=None, parent=None): #
super(화면_TradeCondition, self).__init__(parent)
# self.setAttribute(Qt.WA_DeleteOnClose) # 위젯이 닫힐때 내용 삭제하는 것으로 창이 닫힐때 정보를 저장해야되는 로봇 세팅 시에는 쓰면 에러남!!
self.setupUi(self)
# print("화면_TradeCondition : __init__")
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom #
self.parent = parent
self.progressBar.setValue(0) # Progressbar 초기 셋팅
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명']
self.result = []
self.KiwoomConnect()
self.GetCondition()
# 매수 종목 선정을 위한 체크 함수
def pick_stock(self, data):
row = []
cnt = 0
for code in data['종목코드']:
url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
tag = soup.find_all("td", {"class": "num"})
# tag = soup.find_all("span")
result = []
temp = []
for i in tag:
temp.append(i.text.replace('\t', '').replace('\n', ''))
result.append(code) # 종목코드
result.append(int(temp[5].replace(',',''))) # 전일종가
# result.append(temp[7]) # 시가
# result.append(temp[11]) # 저가
# result.append(temp[9]) # 고가
result.append(int(temp[0].replace(',',''))) # 종가(현재가)
# result.append(temp[6]) # 거래량
row.append(result)
cnt+=1
# Progress Bar 디스플레이(전체 시간 대비 비율)
self.progressBar.setValue(cnt / len(data) * 100)
df = pd.DataFrame(data=row, columns=['종목코드', '전일종가', '종가'])
df_final = pd.merge(data, df, on='종목코드')
df_final = df_final.reset_index(drop=True)
df_final['등락률'] = round((df_final['종가'] - df_final['전일종가'])/df_final['전일종가'] * 100, 1)
df_final = df_final[df_final['등락률'] >= 1][['종목코드', '종목명', '등락률']]
df_final = df_final.reset_index(drop=True)
print(df_final)
return df_final
# 저장된 조건 검색식 목록 읽음
def GetCondition(self):
# 1. 저장된 조건 검색식 목록 불러옴 GetCondition
# 2. 조건식 목록 요청 getConditionLoad
# 3. 목록 요청 응답 이벤트 OnReceiveConditionVer에서
# getConditionNameList로 목록을 딕셔너리로 self.condition에 받음
# 4. GetCondition에서 self.condition을 정리해서 콤보박스에 목록 추가함
try:
# print("화면_TradeCondition : GetCondition")
self.getConditionLoad()
self.df_condition = DataFrame()
self.idx = []
self.conName = []
for index in self.condition.keys(): # condition은 dictionary
# print(self.condition)
self.idx.append(str(index))
self.conName.append(self.condition[index])
# self.sendCondition("0156", self.condition[index], index, 1)
self.df_condition['Index'] = self.idx
self.df_condition['Name'] = self.conName
self.df_condition['Table'] = ">> 조건식 " + self.df_condition['Index'] + " : " + self.df_condition['Name']
self.df_condition['Index'] = self.df_condition['Index'].astype(int)
self.df_condition = self.df_condition.sort_values(by='Index').reset_index(drop=True) # 추가
print(self.df_condition) # 추가
self.comboBox_condition.clear()
self.comboBox_condition.addItems(self.df_condition['Table'].values)
except Exception as e:
print("GetCondition_Error")
print(e)
# 조건검색 해당 종목 요청 메서드
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
# print("화면_TradeCondition : sendCondition")
"""
종목 조건검색 요청 메서드
이 메서드로 얻고자 하는 것은 해당 조건에 맞는 종목코드이다.
해당 종목에 대한 상세정보는 setRealReg() 메서드로 요청할 수 있다.
요청이 실패하는 경우는, 해당 조건식이 없거나, 조건명과 인덱스가 맞지 않거나, 조회 횟수를 초과하는 경우 발생한다.
조건검색에 대한 결과는
1회성 조회의 경우, receiveTrCondition() 이벤트로 결과값이 전달되며
실시간 조회의 경우, receiveTrCondition()과 receiveRealCondition() 이벤트로 결과값이 전달된다.
:param screenNo: string
:param conditionName: string - 조건식 이름
:param conditionIndex: int - 조건식 인덱스
:param isRealTime: int - 조건검색 조회구분(0: 1회성 조회, 1: 실시간 조회)
"""
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int",
screenNo, conditionName, conditionIndex, isRealTime)
# OnReceiveTrCondition() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건 검색 관련 ActiveX와 On시리즈와 붙임(콜백)
def KiwoomConnect(self):
# print("화면_TradeCondition : KiwoomConnect")
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
# 조건 검색 관련 ActiveX와 On시리즈 연결 해제
def KiwoomDisConnect(self):
# print("화면_TradeCondition : KiwoomDisConnect")
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
# 조건식 목록 요청 메서드
def getConditionLoad(self):
""" 조건식 목록 요청 메서드 """
# print("화면_TradeCondition : getConditionLoad")
self.kiwoom.dynamicCall("GetConditionLoad()")
# OnReceiveConditionVer() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건식 목록 획득 메서드(조건식 목록을 딕셔너리로 리턴)
def getConditionNameList(self):
"""
조건식 획득 메서드
조건식을 딕셔너리 형태로 반환합니다.
이 메서드는 반드시 receiveConditionVer() 이벤트 메서드안에서 사용해야 합니다.
:return: dict - {인덱스:조건명, 인덱스:조건명, ...}
"""
# print("화면_TradeCondition : getConditionNameList")
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
return conditionDictionary
# 조건검색 세부 종목 조회 요청시 발생되는 이벤트
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
logger.debug('main:OnReceiveTrCondition [%s] [%s] [%s] [%s] [%s]' % (sScrNo, strCodeList, strConditionName, nIndex, nNext))
# print("화면_TradeCondition : OnReceiveTrCondition")
"""
(1회성, 실시간) 종목 조건검색 요청시 발생되는 이벤트
:param screenNo: string
:param codes: string - 종목코드 목록(각 종목은 세미콜론으로 구분됨)
:param conditionName: string - 조건식 이름
:param conditionIndex: int - 조건식 인덱스
:param inquiry: int - 조회구분(0: 남은데이터 없음, 2: 남은데이터 있음)
"""
try:
if strCodeList == "":
return
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print("종목개수: ", len(self.codeList))
# print(self.codeList)
for code in self.codeList:
row = []
# code.append(c)
row.append(code)
n = self.kiwoom.dynamicCall("GetMasterCodeName(QString)", code)
# now = abs(int(self.kiwoom.dynamicCall("GetCommRealData(QString, int)", code, 10)))
# name.append(n)
row.append(n)
# row.append(now)
self.result.append(row)
# self.df_con['종목코드'] = code
# self.df_con['종목명'] = name
# print(self.df_con)
self.data = DataFrame(data=self.result, columns=self.columns)
self.data['종목코드'] = "'" + self.data['종목코드']
# self.data.to_csv('조건식_'+ self.condition_name + '_종목.csv', encoding='euc-kr', index=False)
# print(self.temp)
# 종목에 대한 주가 크롤링 후 최종 종목 선정
# self.data = self.pick_stock(self.data)
self.model.update(self.data)
# self.model.update(self.df_con)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
except Exception as e:
print("OnReceiveTrCondition Error : ", e)
finally:
self.conditionLoop.exit()
# 조건식 목록 요청에 대한 응답 이벤트
def OnReceiveConditionVer(self, lRet, sMsg):
logger.debug('main:OnReceiveConditionVer : [이벤트] 조건식 저장 [%s] [%s]' % (lRet, sMsg))
# print("화면_TradeCondition : OnReceiveConditionVer")
"""
getConditionLoad() 메서드의 조건식 목록 요청에 대한 응답 이벤트
:param receive: int - 응답결과(1: 성공, 나머지 실패)
:param msg: string - 메세지
"""
try:
self.condition = self.getConditionNameList() # condition이 리턴되서 오면 GetCondition에서 condition 변수 사용 가능
# print("조건식 개수: ", len(self.condition))
# for key in self.condition.keys():
# print("조건식: ", key, ": ", self.condition[key])
except Exception as e:
print("OnReceiveConditionVer_Error")
finally:
self.conditionLoop.exit()
# print(self.conditionName)
# self.kiwoom.dynamicCall("SendCondition(QString,QString, int, int)", '0156', '갭상승', 0, 0)
# 실시간 종목 조건검색 요청시 발생되는 이벤트
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
logger.debug('main:OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
# print("화면_TradeCondition : OnReceiveRealCondition")
"""
실시간 종목 조건검색 요청시 발생되는 이벤트
:param code: string - 종목코드
:param event: string - 이벤트종류("I": 종목편입, "D": 종목이탈)
:param conditionName: string - 조건식 이름
:param conditionIndex: string - 조건식 인덱스(여기서만 인덱스가 string 타입으로 전달됨)
"""
print("[receiveRealCondition]")
print("종목코드: ", sTrCode)
print("이벤트: ", "종목편입" if strType == "I" else "종목이탈")
# 조건식 종목 검색 버튼 클릭 시 실행됨(시그널/슬롯 추가)
def inquiry(self):
# print("화면_TradeCondition : inquiry")
try:
self.result = []
index = int(self.df_condition['Index'][self.comboBox_condition.currentIndex()]) # currentIndex() : 현재 콤보박스에서 선택된 index를 받음 int형
self.condition_name = self.condition[index]
print(index, self.condition[index])
self.sendCondition("0156", self.condition[index], index, 0) # 1 : 실시간 조건검색식 종목 조회, 0 : 일회성 조회
except Exception as e:
print("조건 검색 Error: ", e)
class CTradeCondition(CTrade): # 로봇 추가 시 __init__ : 복사, Setting / 초기조건:전략에 맞게, 데이터처리 / Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
# print("CTradeCondition : __init__")
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.remained_data = True
self.초기설정상태 = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.CList = []
self.실시간종목리스트 = []
self.SmallScreenNumber = 9999
self.d = today
# 조건식 선택에 의해서 투자금, 매수/도 방법, 포트폴리오 수, 검색 종목 등이 저장됨
def Setting(self, sScreenNo, 포트폴리오수, 조건식인덱스, 조건식명, 조건검색타입, 단위투자금, 매수방법, 매도방법):
# print("CTradeCondition : Setting")
self.sScreenNo = sScreenNo
self.포트폴리오수 = 포트폴리오수
self.조건식인덱스 = 조건식인덱스
self.조건식명 = 조건식명
self.조건검색타입 = int(조건검색타입)
self.단위투자금 = 단위투자금
self.매수방법 = 매수방법
self.매도방법 = 매도방법
self.보유일 = 1
self.익절 = 5 # percent
self.고가대비 = -1 # percent
self.손절 = -2.7 # percent
self.투자금비중 = 70 # 예수금 대비 percent
print("조검검색 로봇 셋팅 완료 - 조건인덱스 : %s, 조건식명 : %s, 검색타입 : %s"%(self.조건식인덱스, self.조건식명, self.조건검색타입))
logger.info("조검검색 로봇 셋팅 완료 - 조건인덱스 : %s, 조건식명 : %s, 검색타입 : %s" % (self.조건식인덱스, self.조건식명, self.조건검색타입))
# Robot_Run이 되면 실행됨 - 매도 종목을 리스트로 저장
def 초기조건(self, codes):
# print("CTradeCondition : 초기조건")
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
self.sell_band = [0, 3, 5, 10, 15, 25]
self.매도구간별조건 = [-2.7, 0.5, -2.0, -2.0, -2.0, -2.0]
self.매수모니터링 = True
self.clearcheck = False # 당일청산 체크변수
self.조건검색이벤트 = False
# 매수할 종목은 해당 조건에서 검색된 종목
# 매도할 종목은 이미 매수가 되어 포트폴리오에 저장되어 있는 종목
self.금일매도종목 = []
self.매도할종목 = []
self.매수할종목 = codes
# for code in codes: # 선택한 종목검색식의 종목은 '매수할종목'에 추가
# stock = self.portfolio.get(code) # 초기 로봇 실행 시 포트폴리오는 비어있음
# if stock != None: # 검색한 종목이 포트폴리오에 있으면 '매도할종목'에 추가
# self.매도할종목.append(code)
# else: # 포트폴리오에 없으면 매수종목리스트에 저장
# self.매수할종목.append(code)
for port_code in list(self.portfolio.keys()): # 포트폴리오에 있는 종목은 '매도할종목'에 추가
보유기간 = holdingcal(self.portfolio[port_code].매수일) - 1
if 보유기간 < 3:
self.portfolio[port_code].매도전략 = 5 # 매도지연 종목은 목표가 낮춤 5% -> 3% -> 1%
elif 보유기간 >= 3 and 보유기간 < 5:
self.portfolio[port_code].매도전략 = 3
elif 보유기간 >= 3 and 보유기간 < 5:
self.portfolio[port_code].매도전략 = 1
print(self.portfolio[port_code].__dict__)
logger.info(self.portfolio[port_code].__dict__)
self.매도할종목.append(port_code)
# 수동 포트폴리오 생성
def manual_portfolio(self):
self.portfolio = dict()
self.Stocklist = {
'032190': {'종목명': '다우데이타', '종목코드': '032190', '매수가': [16150], '수량': 12, '보유일':1, '매수일': '2020/08/05 09:08:54'},
'047400': {'종목명': '유니온머티리얼', '종목코드': '047400', '매수가': [5350], '수량': 36, '보유일':1, '매수일': '2020/08/05 09:42:55'},
'085660': {'종목명': '차바이오텍', '종목코드': '085660', '매수가': [22100], '수량': 9, '보유일': 1,
'매수일': '2020/08/05 09:08:54'},
'000020': {'종목명': '동화약품', '종목코드': '000020', '매수가': [25800
], '수량': 7, '보유일': 1,
'매수일': '2020/08/05 09:42:55'},
}
for code in list(self.Stocklist.keys()):
self.portfolio[code] = CPortStock(종목코드=code, 종목명=self.Stocklist[code]['종목명'],
매수가=self.Stocklist[code]['매수가'][0],
보유일=self.Stocklist[code]['보유일'],
수량=self.Stocklist[code]['수량'],
매수일=self.Stocklist[code]['매수일'])
# google spreadsheet 매매이력 생성
def save_history(self, code, status):
# 매매이력 sheet에 해당 종목(매수된 종목)이 있으면 row를 반환 아니면 예외처리 -> 신규 매수로 처리
try:
code_row = condition_history_sheet.findall(self.portfolio[code].종목명)[
-1].row # 종목명이 있는 모든 셀을 찾아서 맨 아래에 있는 셀을 선택
cell = alpha_list[condition_history_cols.index('매도가')] + str(code_row) # 매수 이력에 있는 종목이 매도가 되었는지 확인
sell_price = condition_history_sheet.acell(str(cell)).value
# 매도 이력은 추가 매도(매도전략5의 경우)나 신규 매도인 경우라 매도 이력 유무와 상관없음
if status == '매도': # 포트폴리오 데이터 사용
cell = alpha_list[condition_history_cols.index('매도가')] + str(code_row)
condition_history_sheet.update_acell(cell, self.portfolio[code].매도가)
cell = alpha_list[condition_history_cols.index('매도일')] + str(code_row)
condition_history_sheet.update_acell(cell, datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
계산수익률 = round((self.portfolio[code].매도가 / self.portfolio[code].매수가 - 1) * 100, 2)
cell = alpha_list[condition_history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
condition_history_sheet.update_acell(cell, 계산수익률)
# 매수 이력은 있으나 매도 이력이 없음 -> 매도 전 추가 매수
if sell_price == '':
if status == '매수': # 포트폴리오 데이터 사용
cell = alpha_list[condition_history_cols.index('매수가')] + str(code_row)
condition_history_sheet.update_acell(cell, self.portfolio[code].매수가)
cell = alpha_list[condition_history_cols.index('매수일')] + str(code_row)
condition_history_sheet.update_acell(cell, self.portfolio[code].매수일)
else: # 매도가가 기록되어 거래가 완료된 종목으로 판단하여 예외발생으로 신규 매수 추가함
raise Exception('매매완료 종목')
except:
row = []
try:
if status == '매수':
row.append(self.portfolio[code].종목명)
row.append(self.portfolio[code].매수가)
row.append(self.portfolio[code].매수일)
condition_history_sheet.append_row(row)
except Exception as e:
print('[%s]save_history Error :'%(self.sName,e))
Telegram('[StockTrader][%s]save_history Error :'%(self.sName,e), send='mc')
logger.error('[%s]save_history Error :'%(self.sName,e))
# 매수 전략별 매수 조건 확인
def buy_strategy(self, code, price):
result = False
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
if self.단위투자금 // 현재가 > 0 and 현재가 >= 고가 * (0.99) and 저가 > 전일종가 and 현재가 < 시가 * 1.1 and 시가 <= 전일종가 * 1.05:
result = True
return result
# 매도 구간 확인
def profit_band_check(self, 현재가, 매수가):
# print('현재가, 매수가', 현재가, 매수가)
ratio = round((현재가 - 매수가) / 매수가 * 100, 2)
# print('ratio', ratio)
if ratio < 3:
return 1
elif ratio in self.sell_band:
return self.sell_band.index(ratio) + 1
else:
self.sell_band.append(ratio)
self.sell_band.sort()
band = self.sell_band.index(ratio)
self.sell_band.remove(ratio)
return band
# 매도 전략
def sell_strategy(self, code, price):
result = False
band = self.portfolio[code].매도구간 # 이전 매도 구간 받음
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
매수가 = self.portfolio[code].매수가
sell_price = 현재가
# 매도를 위한 수익률 구간 체크(매수가 대비 현재가의 수익률 조건에 다른 구간 설정)
new_band = self.profit_band_check(현재가, 매수가)
if (hogacal(시가, 0, self.portfolio[code].시장, '상한가')) <= 현재가:
band = 7
if band < new_band: # 이전 구간보다 현재 구간이 높을 경우(시세가 올라간 경우)만
band = new_band # 구간을 현재 구간으로 변경(반대의 경우는 구간 유지)
# self.sell_band = [0, 3, 5, 10, 15, 25]
# self.매도구간별조건 = [-2.7, 0.3, -3.0, -4.0, -5.0, -7.0]
if band == 1 and 현재가 <= 매수가 * (1 + (self.매도구간별조건[0] / 100)):
result = False
elif band == 2 and 현재가 <= 매수가 * (1 + (self.매도구간별조건[1] / 100)): # 3% 이하일 경우 0.3%까지 떨어지면 매도
result = True
elif band == 3 and 현재가 <= 고가 * (1 + (self.매도구간별조건[2] / 100)): # 5% 이상일 경우 고가대비 -3%까지 떨어지면 매도
result = True
elif band == 4 and 현재가 <= 고가 * (1 + (self.매도구간별조건[3] / 100)):
result = True
elif band == 5 and 현재가 <= 고가 * (1 + (self.매도구간별조건[4] / 100)):
result = True
elif band == 6 and 현재가 <= 고가 * (1 + (self.매도구간별조건[5] / 100)):
result = True
elif band == 7 and 현재가 >= (hogacal(시가, -3, self.portfolio[code].시장, '상한가')):
result = True
self.portfolio[code].매도구간 = band # 포트폴리오에 매도구간 업데이트
if current_time >= '15:10:00': # 15시 10분에 매도 처리
result = True
"""
if self.portfolio[code].매도전략변경1 == False and current_time >= '11:00:00' and current_time < '13:00:00':
self.portfolio[code].매도전략변경1 = True
self.portfolio[code].매도전략 = self.portfolio[code].매도전략 * 0.6
elif self.portfolio[code].매도전략변경2 == False and current_time >= '13:00:00':
self.portfolio[code].매도전략변경2 = True
self.portfolio[code].매도전략 = self.portfolio[code].매도전략 * 0.6
if self.portfolio[code].매도전략 < 0.3:
self.portfolio[code].매도전략 = 0.3
# 2. 익절 매도 전략
if 현재가 >= 매수가 * (1 + (self.portfolio[code].매도전략 / 100)):
result = True
sell_price = 현재가
# 3. 고가대비 비율 매도 전략
# elif 현재가 <= 고가 * (1 + (self.고가대비 / 100)):
# result = True
# sell_price = 현재가
# 4. 손절 매도 전략
# elif 현재가 <= 매수가 * (1 + (self.손절 / 100)):
# result = True
# sell_price = 현재가
"""
return result, sell_price
# 당일청산 전략
def clearning_strategy(self):
if self.clearcheck == True:
print('당일청산 매도')
try:
for code in list(self.portfolio.keys()):
if self.주문실행중_Lock.get('S_%s' % code) is None and self.portfolio[code].수량 != 0:
self.portfolio[code].매도구간 = 0
self.매도방법 = '03' # 03:시장가
(result, order) = self.정량매도(sRQName='S_%s' % code, 종목코드=code, 매도가=self.portfolio[code].매수가,
수량=self.portfolio[code].수량)
if result == True:
self.주문실행중_Lock['S_%s' % code] = True
Telegram('[StockTrader]정량매도(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량), send='mc')
logger.info('정량매도(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량))
else:
Telegram('[StockTrader]정액매도실패(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량), send='mc')
logger.info('정량매도실패(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량))
except Exception as e:
print("clearning_strategy Error :", e)
# 주문처리
def 실시간데이터처리(self, param):
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
전일종가 = 현재가 - 전일대비
# MainWindow의 __init__에서 CODE_POOL 변수 선언(self.CODE_POOL = self.get_code_pool()), pool[종목코드] = [시장구분, 종목명, 주식수, 시가총액]
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
시장구분 = self.parent.CODE_POOL[종목코드][0]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 전일종가]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
# 정액매도 후 포트폴리오/매도할종목에서 제거
if 종목코드 in self.매도할종목:
if self.portfolio.get(종목코드) is not None and self.주문실행중_Lock.get('S_%s' % 종목코드) is None:
# 매도 전략별 모니터링 체크
sell_check, 매도가 = self.sell_strategy(종목코드, 시세)
if sell_check == True:
(result, order) = self.정액매도(sRQName='S_%s' % 종목코드, 종목코드=종목코드, 매도가=매도가, 수량=self.portfolio[종목코드].수량)
if result == True:
self.주문실행중_Lock['S_%s' % 종목코드] = True
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
Telegram('[StockTrader]%s 매도주문 : 종목코드=%s, 종목명=%s, 매도구간=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, self.portfolio[종목코드].매도구간, 현재가, self.portfolio[종목코드].수량), send='mc')
logger.info('[StockTrader]%s 매도주문 : 종목코드=%s, 종목명=%s, 매도구간=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, self.portfolio[종목코드].매도구간, 현재가, self.portfolio[종목코드].수량))
else:
Telegram('[StockTrader]%s 매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, 현재가, self.portfolio[종목코드].수량), send='mc')
logger.info('[StockTrader]%s 매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, 현재가, self.portfolio[종목코드].수량))
# 매수할 종목에 대해서 정액매수 주문하고 포트폴리오/매도할종목에 추가, 매수할종목에서 제외
if current_time <= '14:30:00':
if 종목코드 in self.매수할종목 and 종목코드 not in self.금일매도종목:
if len(self.portfolio) < self.최대포트수 and self.portfolio.get(종목코드) is None and self.주문실행중_Lock.get('B_%s' % 종목코드) is None:
buy_check = self.buy_strategy(종목코드, 시세)
if buy_check == True:
(result, order) = self.정액매수(sRQName='B_%s' % 종목코드, 종목코드=종목코드, 매수가=현재가, 매수금액=self.단위투자금)
if result == True:
self.portfolio[종목코드] = CPortStock(종목코드=종목코드, 종목명=종목명, 시장=시장구분, 매수가=현재가, 보유일=self.보유일, 매도전략 = self.익절,
매수일=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.주문실행중_Lock['B_%s' % 종목코드] = True
Telegram('[StockTrader]%s 매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가), send='mc')
logger.info('[StockTrader]%s 매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가))
else:
Telegram('[StockTrader]%s 매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가), send='mc')
logger.info('[StockTrader]%s 매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가))
else:
if self.매수모니터링 == True:
self.parent.ConditionTick.stop()
self.매수모니터링 = False
logger.info("매수모니터링 시간 초과")
def 접수처리(self, param):
pass
# OnReceiveChejanData에서 체결처리가 되면 체결처리 호출
def 체결처리(self, param):
종목코드 = param['종목코드']
주문번호 = param['주문번호']
self.주문결과[주문번호] = param
주문수량 = int(param['주문수량'])
미체결수량 = int(param['미체결수량'])
체결가 = int(0 if (param['체결가'] is None or param['체결가'] == '') else param['체결가']) # 매입가 동일
단위체결량 = int(0 if (param['단위체결량'] is None or param['단위체결량'] == '') else param['단위체결량'])
당일매매수수료 = int(0 if (param['당일매매수수료'] is None or param['당일매매수수료'] == '') else param['당일매매수수료'])
당일매매세금 = int(0 if (param['당일매매세금'] is None or param['당일매매세금'] == '') else param['당일매매세금'])
# 매수
if param['매도수구분'] == '2':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매수가 = int(주문[2:])
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
P.매수가 = 체결가 # 단위체결가
P.수량 += 단위체결량 # 추가 매수 대비해서 기존 수량에 체결된 수량 계속 더함(주문수량 - 미체결수량)
P.매수일 = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
logger.error('ERROR 포트에 종목이 없음 !!!!')
if 미체결수량 == 0:
try:
self.주문실행중_Lock.pop(주문)
self.매수할종목.remove(종목코드)
self.매도할종목.append(종목코드)
self.save_history(종목코드, status='매수')
Telegram('[StockTrader]%s 매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (self.sName, P.종목명, P.매수가, P.수량), send='mc')
logger.info('[StockTrader]%s %s 매수 완료 : 매수/주문%s Pop, 매도 Append ' % (self.sName, 종목코드, 주문))
except Exception as e:
Telegram('[StockTrader]%s 체결처리_매수 POP에러 종목명:%s ' % (self.sName, P.종목명), send='mc')
logger.error('[StockTrader]%s 체결처리_매수 POP에러 종목명:%s ' % (self.sName, P.종목명))
# 매도
if param['매도수구분'] == '1':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매도가 = int(주문[2:])
try:
if 미체결수량 == 0:
self.주문실행중_Lock.pop(주문)
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
self.portfolio[종목코드].매도가 = 체결가
self.save_history(종목코드, status='매도')
Telegram('[StockTrader]%s 매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (self.sName, param['종목명'], 체결가, 주문수량), send='mc')
logger.info('[StockTrader]%s 매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (self.sName, param['종목명'], 체결가, 주문수량))
except Exception as e:
Telegram('[StockTrader]%s 체결처리_매도 매매이력 Error : %s' % (self.sName, e), send='mc')
logger.error('[StockTrader]%s 체결처리_매도 매매이력 Error : %s' % (self.sName, e))
# 메인 화면에 반영
self.parent.RobotView()
def 잔고처리(self, param):
종목코드 = param['종목코드']
P = self.portfolio.get(종목코드)
if P is not None:
P.매수가 = int(0 if (param['매입단가'] is None or param['매입단가'] == '') else param['매입단가'])
P.수량 = int(0 if (param['보유수량'] is None or param['보유수량'] == '') else param['보유수량'])
if P.수량 == 0:
self.portfolio.pop(종목코드)
self.매도할종목.remove(종목코드)
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
logger.info('잔고처리_포트폴리오POP %s ' % 종목코드)
# 메인 화면에 반영
self.parent.RobotView()
# MainWindow의 ConditionTick에 의해서 3분마다 실행
def ConditionCheck(self):
if '3' in self.sName:
if current_time >= "15:00:00" and self.조건검색이벤트 == False:
self.조건검색이벤트 = True
codes = self.GetCodes(self.조건식인덱스, self.조건식명, self.조건검색타입)
print(current_time, codes)
code_list=[]
for code in codes:
code_list.append(code + '_' + self.parent.CODE_POOL[code][1] + '\n')
code_list = "".join(code_list)
print(current_time, code_list)
Telegram(code_list, send='mc')
else:
pass
else:
codes = self.GetCodes(self.조건식인덱스, self.조건식명, self.조건검색타입)
print(current_time, codes)
for code in codes:
if code not in self.매수할종목 and self.portfolio.get(code) is None and code not in self.금일매도종목:
print('매수종목추가 : ', code, self.parent.CODE_POOL[code][1])
self.매수할종목.append(code)
self.실시간종목리스트.append(code)
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';') # 실시간 시세조회 종목 추가
logger.debug("[%s]실시간데이타요청 등록결과 %s %s" % (self.sName, self.실시간종목리스트, ret))
# 실시간 조검 검색 편입 종목 처리
def 실시간조건처리(self, code):
if (code not in self.매수할종목) and (self.portfolio.get(code) is None) and (code not in self.금일매도종목):
print('매수종목추가 : ', code)
self.매수할종목.append(code)
self.실시간종목리스트.append(code)
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';') # 실시간 시세조회 종목 추가
logger.debug("[%s]실시간데이타요청 등록결과 %s %s" % (self.sName, self.실시간종목리스트, ret))
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
codes = []
self.codeList = []
# self.manual_portfolio()
if flag == True:
print("%s ROBOT 실행" % (self.sName))
self.KiwoomConnect()
try:
logger.info("[%s]조건식 거래 로봇 실행"%(self.sName))
self.sAccount = Account
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.투자총액 = floor(int(d2deposit.replace(",", "")) * (self.투자금비중 / 100))
print('D+2 예수금 : ', int(d2deposit.replace(",", "")))
print('투자금 : ', self.투자총액)
print('단위투자금 : ', self.단위투자금)
self.최대포트수 = self.포트폴리오수 # floor(self.투자총액 / self.단위투자금) + len(self.portfolio)
# print('기존포트수 : ', len(self.portfolio))
print('최대포트수 : ', self.최대포트수)
print("조건식 인덱스 : ", self.조건식인덱스, type(self.조건식인덱스))
print("조건식명 : ", self.조건식명)
if self.조건검색타입 == 0: # 3분봉 검색
self.parent.ConditionTick.start(1000)
else: # 실시간 검색
print('실시간 조건검색')
codes = self.GetCodes(self.조건식인덱스, self.조건식명, self.조건검색타입)
codes = []
self.초기조건(codes)
print("매수 : ", self.매수할종목)
print("매도 : ", self.매도할종목)
self.실시간종목리스트 = self.매도할종목 + self.매수할종목
logger.info("[%s]오늘 거래 종목 : %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
if len(self.실시간종목리스트) > 0:
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';') # 실시간 시세조회 등록
logger.debug("실시간데이타요청 등록결과 %s" % ret)
except Exception as e:
print('[%s]_Run Error : %s' % (self.sName,e))
Telegram('[StockTrader][%s]_Run Error : %s' % (self.sName,e), send='mc')
logger.error('[StockTrader][%s]_Run Error : %s' % (self.sName,e))
else:
if self.조건검색타입 == 0:
self.parent.ConditionTick.stop() # MainWindow 타이머 중지
else:
ret = self.sendConditionStop("0156", self.조건식명, self.조건식인덱스) # 실시간 조검 검색 중지
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
if self.portfolio is not None:
for code in list(self.portfolio.keys()):
if self.portfolio[code].수량 == 0:
self.portfolio.pop(code)
if len(self.금일매도종목) > 0:
try:
Telegram("[StockTrader]%s 금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목), send='mc')
logger.info("[%s]금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목))
self.parent.statusbar.showMessage("금일 매도 종목 손익 Upload")
self.DailyProfit(self.금일매도종목)
except Exception as e:
print('%s 금일매도종목 결과 업로드 Error : %s' %(self.sName, e))
finally:
del self.DailyProfitLoop # 금일매도결과 업데이트 시 QEventLoop 사용으로 로봇 저장 시 pickcle 에러 발생하여 삭제시킴
del self.ConditionLoop
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
class 화면_ConditionMonitoring(QDialog, Ui_TradeCondition):
def __init__(self, sScreenNo, kiwoom=None, parent=None): #
super(화면_ConditionMonitoring, self).__init__(parent)
# self.setAttribute(Qt.WA_DeleteOnClose) # 위젯이 닫힐때 내용 삭제하는 것으로 창이 닫힐때 정보를 저장해야되는 로봇 세팅 시에는 쓰면 에러남!!
self.setupUi(self)
self.setWindowTitle("ConditionMonitoring")
self.lineEdit_name.setText('ConditionMonitoring')
self.progressBar.setValue(0) # Progressbar 초기 셋팅
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom #
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명', '조건식']
self.result = []
self.KiwoomConnect()
self.GetCondition()
# 저장된 조건 검색식 목록 읽음
def GetCondition(self):
try:
self.getConditionLoad()
self.df_condition = DataFrame()
self.idx = []
self.conName = []
for index in self.condition.keys(): # condition은 dictionary
# print(self.condition)
self.idx.append(str(index))
self.conName.append(self.condition[index])
# self.sendCondition("0156", self.condition[index], index, 1)
self.df_condition['Index'] = self.idx
self.df_condition['Name'] = self.conName
self.df_condition['Table'] = ">> 조건식 " + self.df_condition['Index'] + " : " + self.df_condition['Name']
self.df_condition['Index'] = self.df_condition['Index'].astype(int)
self.df_condition = self.df_condition.sort_values(by='Index').reset_index(drop=True) # 추가
print(self.df_condition) # 추가
self.comboBox_condition.clear()
self.comboBox_condition.addItems(self.df_condition['Table'].values)
except Exception as e:
print("GetCondition_Error")
print(e)
# 조건검색 해당 종목 요청 메서드
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int",
screenNo, conditionName, conditionIndex, isRealTime)
# OnReceiveTrCondition() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건 검색 관련 ActiveX와 On시리즈와 붙임(콜백)
def KiwoomConnect(self):
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
# 조건 검색 관련 ActiveX와 On시리즈 연결 해제
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
# 조건식 목록 요청 메서드
def getConditionLoad(self):
self.kiwoom.dynamicCall("GetConditionLoad()")
# OnReceiveConditionVer() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건식 목록 획득 메서드(조건식 목록을 딕셔너리로 리턴)
def getConditionNameList(self):
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
return conditionDictionary
# 조건검색 세부 종목 조회 요청시 발생되는 이벤트
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
logger.debug('main:OnReceiveTrCondition [%s] [%s] [%s] [%s] [%s]' % (
sScrNo, strCodeList, strConditionName, nIndex, nNext))
try:
if strCodeList == "":
return
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print("종목개수: ", len(self.codeList))
# print(self.codeList)
for code in self.codeList:
row = []
# code.append(c)
row.append(code)
n = self.kiwoom.dynamicCall("GetMasterCodeName(QString)", code)
# now = abs(int(self.kiwoom.dynamicCall("GetCommRealData(QString, int)", code, 10)))
# name.append(n)
row.append(n)
row.append(strConditionName)
self.result.append(row)
# self.df_con['종목코드'] = code
# self.df_con['종목명'] = name
# print(self.df_con)
self.data =
|
DataFrame(data=self.result, columns=self.columns)
|
pandas.DataFrame
|
from slytherin.hash import hash_object
from slytherin.functions import get_function_arguments
from ravenclaw.preprocessing import Polynomial, Normalizer
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from pandas import DataFrame, concat
from random import randint, random, choice
from func_timeout import func_timeout, FunctionTimedOut
import matplotlib.pyplot as plt
from numpy import where
from .create_arguments import create_arguments
from .Measurement import Measurement
from ..time import get_elapsed
from ..time import get_now
from ..progress import ProgressBar
# Estimator gets a single argument function and estimates the time it takes to run the function based on the argument
# the function should accept an int larger than 0
class Estimator:
def __init__(self, function, args=None, unit='s', polynomial_degree=2, timeout=20):
self._function = function
self._function_arguments = get_function_arguments(function=function)
self._unit = unit
self._measurements = {}
self._polynomial_degree = polynomial_degree
self._model = None
self._normalizer = None
self._error_model = None
self._max_x = None
self._args = args
self._timeout = timeout
self._num_errors = 0
self._num_regular_runs = 0
self._num_timeouts = 0
self._x_data_columns = {}
self._error_x_data_columns = {}
@staticmethod
def get_key(**kwargs):
return hash_object(kwargs)
def check_arguments(self, kwargs, method_name):
unknown_arguments = [key for key in kwargs.keys() if key not in self._function_arguments]
missing_arguments = [key for key in self._function_arguments if key not in kwargs]
if len(missing_arguments) == 1:
return f'{method_name}() is missing the argument "{missing_arguments[0]}"'
elif len(missing_arguments) > 1:
arguments_string = '", "'.join(missing_arguments)
return f'{method_name}() is missing arguments "{arguments_string}"'
if len(unknown_arguments) == 0:
return False
elif len(unknown_arguments) == 1:
return f'{method_name}() got an unexpected argument "{unknown_arguments[0]}"'
else:
arguments_string = '", "'.join(unknown_arguments)
return f'{method_name}() got unexpected arguments "{arguments_string}"'
def get_arguments(self, arguments, **kwargs):
if arguments is None and len(kwargs) == 0:
raise ValueError('either arguments should be provided or kwargs!')
elif arguments is not None and len(kwargs) > 0:
raise ValueError('only one of arguments and kwargs should be provided!')
elif arguments is None:
arguments = kwargs
return arguments
def measure(self, timeout=None, arguments=None, **kwargs):
"""
:type timeout: int or float
:type arguments: NoneType or dict
:rtype: Measurement
"""
kwargs = self.get_arguments(arguments=arguments, **kwargs)
if self.check_arguments(kwargs=kwargs, method_name='measure'):
raise TypeError(self.check_arguments(kwargs=kwargs, method_name='measure'))
key = self.get_key(**kwargs)
if key in self._measurements:
return self._measurements[key]
else:
start_time = get_now()
if not timeout:
try:
result = self._function(**kwargs)
timeout_error = False
other_error = False
self._num_regular_runs += 1
except Exception as e:
result = None
timeout_error = False
other_error = True
self._num_errors += 1
else:
def run_function():
return self._function(**kwargs)
try:
result = func_timeout(timeout, run_function)
timeout_error = False
other_error = False
self._num_regular_runs += 1
except FunctionTimedOut:
result = None
timeout_error = True
other_error = False
self._num_timeouts += 1
except Exception as e:
result = None
timeout_error = False
other_error = True
self._num_errors += 1
elapsed = get_elapsed(start=start_time, unit=self._unit)
measurement = Measurement(
x=kwargs, result=result, elapsed=elapsed, timeout_error=timeout_error, other_error=other_error
)
self._measurements[key] = measurement
self._model = None
self._normalizer = None
self._error_model = None
if self._max_x is None:
self._max_x = kwargs
else:
self._max_x = {key: max(value, kwargs[key]) for key, value in self._max_x.items()}
return measurement
@property
def data(self):
"""
:rtype: DataFrame
"""
return DataFrame.from_records(
[measurement.dictionary for measurement in self.measurements]
)
@property
def measurements(self):
"""
:rtype: list[Measurement]
"""
measurements = sorted(list(self._measurements.values()))
# set the weights
min_elapsed = measurements[0].elapsed_time
for measurement in measurements:
if min_elapsed > 0:
measurement._weight = 1 + (measurement.elapsed_time / min_elapsed) ** 0.5
else:
measurement._weight = 1
return measurements
@property
def num_measurements(self):
"""
:rtype: int
"""
return len(self._measurements)
@property
def num_errors(self):
return self._num_errors
@property
def num_regular_runs(self):
return self._num_regular_runs
@property
def num_timeouts(self):
return self._num_timeouts
@property
def num_runs(self):
return self.num_errors + self.num_regular_runs
def get_x_data(self, x, degree=None):
"""
:type x: DataFrame or dict or list
:type degree: NoneType or int
:rtype: DataFrame
"""
if isinstance(x, dict):
if all([isinstance(value, (list, tuple)) for value in x.values()]):
data = DataFrame(x)
else:
data = DataFrame.from_records([x])
elif isinstance(x, list) and all([isinstance(element, dict) for element in x]):
data =
|
DataFrame.from_records(x)
|
pandas.DataFrame.from_records
|
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start =
|
Period(freq='D', year=2006, month=12, day=28)
|
pandas.Period
|
"""This creates Pandas dataframes containing predictions.
"""
__author__ = '<NAME>'
from typing import Callable, List, Iterable, Any
from dataclasses import dataclass, field
import logging
import sys
import itertools as it
from pathlib import Path
from frozendict import frozendict
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from zensols.persist import persisted
from zensols.deeplearn.vectorize import (
CategoryEncodableFeatureVectorizer,
FeatureVectorizerManagerSet,
)
from zensols.deeplearn.batch import Batch, BatchStash, DataPoint
from . import (
ModelResultError, ModelResult, EpochResult, ClassificationMetrics
)
logger = logging.getLogger(__name__)
@dataclass
class PredictionsDataFrameFactory(object):
"""Create a Pandas data frame containing results from a result as output from a
``ModelExecutor``. The data frame contains the feature IDs, labels,
predictions mapped back to their original value from the feature data item.
Currently only classification models are supported.
"""
METRIC_DESCRIPTIONS = frozendict(
{'wF1': 'weighted F1',
'wP': 'weighted precision',
'wR': 'weighted recall',
'mF1': 'micro F1',
'mP': 'micro precision',
'mR': 'micro recall',
'MF1': 'macro F1',
'MP': 'macro precision',
'MR': 'macro recall',
'correct': 'the number of correct classifications',
'count': 'the number of data points in the test set',
'acc': 'accuracy',
})
"""Dictionary of performance metrics column names to human readable
descriptions.
"""
ID_COL = 'id'
"""The data point ID in the generated dataframe in :obj:`dataframe` and
:obj:`metrics_dataframe`.
"""
LABEL_COL = 'label'
"""The gold label column in the generated dataframe in :obj:`dataframe` and
:obj:`metrics_dataframe`.
"""
PREDICTION_COL = 'pred'
"""The prediction column in the generated dataframe in :obj:`dataframe` and
:obj:`metrics_dataframe`.
"""
CORRECT_COL = 'correct'
"""The correct/incorrect indication column in the generated dataframe in
:obj:`dataframe` and :obj:`metrics_dataframe`.
"""
METRICS_DF_WEIGHTED_COLUMNS = tuple('wF1 wP wR'.split())
"""Weighed performance metrics columns."""
METRICS_DF_MICRO_COLUMNS = tuple('mF1 mP mR'.split())
"""Micro performance metrics columns."""
METRICS_DF_MACRO_COLUMNS = tuple('MF1 MP MR'.split())
"""Macro performance metrics columns."""
METRICS_DF_COLUMNS = tuple(('label wF1 wP wR mF1 mP mR MF1 MP MR ' +
'correct acc count').split())
"""
:see: :obj:`metrics_dataframe`
"""
source: Path = field()
"""The source file from where the results were unpickled."""
result: ModelResult = field()
"""The epoch containing the results."""
stash: BatchStash = field()
"""The batch stash used to generate the results from the
:class:`~zensols.deeplearn.model.ModelExecutor`. This is used to get the
vectorizer to reverse map the labels.
"""
column_names: List[str] = field(default=None)
"""The list of string column names for each data item the list returned from
``data_point_transform`` to be added to the results for each
label/prediction
"""
data_point_transform: Callable[[DataPoint], tuple] = field(default=None)
"""A function that returns a tuple, each with an element respective of
``column_names`` to be added to the results for each label/prediction; if
``None`` (the default), ``str`` used (see the `Iris Jupyter Notebook
<https://github.com/plandes/deeplearn/blob/master/notebook/iris.ipynb>`_
example)
"""
batch_limit: int = sys.maxsize
"""The max number of batche of results to output."""
epoch_result: EpochResult = field(default=None)
"""The epoch containing the results. If none given, take it from the test
results..
"""
label_vectorizer_name: str = field(default=None)
"""The name of the vectorizer that encodes the labels, which is used to reverse
map from integers to their original string nominal values.
"""
def __post_init__(self):
if self.column_names is None:
self.column_names = ('data',)
if self.data_point_transform is None:
self.data_point_transform = lambda dp: (str(dp),)
if self.epoch_result is None:
self.epoch_result = self.result.test.results[0]
@property
def name(self) -> str:
"""The name of the results taken from :class:`.ModelResult`."""
return self.result.name
def _transform_dataframe(self, batch: Batch, labs: List[str],
preds: List[str]):
transform: Callable = self.data_point_transform
rows = []
for dp, lab, pred in zip(batch.data_points, labs, preds):
assert dp.label == lab
row = [dp.id, lab, pred, lab == pred]
row.extend(transform(dp))
rows.append(row)
cols = [self.ID_COL, self.LABEL_COL, self.PREDICTION_COL,
self.CORRECT_COL]
cols = cols + list(self.column_names)
return pd.DataFrame(rows, columns=cols)
def _calc_len(self, batch: Batch) -> int:
return len(batch)
def _narrow_encoder(self, batch: Batch) -> LabelEncoder:
vec: CategoryEncodableFeatureVectorizer = None
if self.label_vectorizer_name is None:
vec = batch.get_label_feature_vectorizer()
while True:
if not isinstance(vec, CategoryEncodableFeatureVectorizer) \
and hasattr(vec, 'delegate'):
vec = vec.delegate
else:
break
else:
vms: FeatureVectorizerManagerSet = \
batch.batch_stash.vectorizer_manager_set
vec = vms.get_vectorizer(self.label_vectorizer_name)
if not isinstance(vec, CategoryEncodableFeatureVectorizer):
raise ModelResultError(
'Expecting a category feature vectorizer but got: ' +
f'{vec} ({vec.name if vec else "none"})')
return vec.label_encoder
def _batch_dataframe(self, inv_trans: bool) -> Iterable[pd.DataFrame]:
"""Return a data from for each batch.
"""
epoch_labs: List[np.ndarray] = self.epoch_result.labels
epoch_preds: List[np.ndarray] = self.epoch_result.predictions
start = 0
for bid in it.islice(self.epoch_result.batch_ids, self.batch_limit):
batch: Batch = self.stash[bid]
end = start + self._calc_len(batch)
preds: List[int] = epoch_preds[start:end]
labs: List[int] = epoch_labs[start:end]
if inv_trans:
le: LabelEncoder = self._narrow_encoder(batch)
inv_trans: Callable = le.inverse_transform
preds: List[str] = inv_trans(preds)
labs: List[str] = inv_trans(labs)
df = self._transform_dataframe(batch, labs, preds)
df['batch_id'] = bid
assert len(df) == len(labs)
start = end
yield df
def _create_dataframe(self, inv_trans: bool) -> pd.DataFrame:
return pd.concat(self._batch_dataframe(inv_trans), ignore_index=True)
@property
@persisted('_dataframe')
def dataframe(self) -> pd.DataFrame:
"""The predictions and labels as a dataframe. The first columns are generated
from ``data_point_tranform``, and the remaining columns are:
- id: the ID of the feature (not batch) data item
- label: the label given by the feature data item
- pred: the prediction
- correct: whether or not the prediction was correct
"""
return self._create_dataframe(True)
def _to_metric_row(self, lab: str, mets: ClassificationMetrics) -> \
List[Any]:
return [lab, mets.weighted.f1, mets.weighted.precision,
mets.weighted.recall,
mets.micro.f1, mets.micro.precision, mets.micro.recall,
mets.macro.f1, mets.macro.precision, mets.macro.recall,
mets.n_correct, mets.accuracy, mets.n_outcomes]
def _add_metric_row(self, le: LabelEncoder, df: pd.DataFrame, ann_id: str,
rows: List[Any]):
lab: str = le.inverse_transform([ann_id])[0]
data = df[self.LABEL_COL], df[self.PREDICTION_COL]
mets = ClassificationMetrics(*data, len(data[0]))
row = self._to_metric_row(lab, mets)
rows.append(row)
def metrics_to_series(self, lab: str, mets: ClassificationMetrics) -> \
pd.Series:
"""Create a single row dataframe from classification metrics."""
row = self._to_metric_row(lab, mets)
return pd.Series(row, index=self.METRICS_DF_COLUMNS)
@property
def metrics_dataframe(self) -> pd.DataFrame:
"""Performance metrics by comparing the gold label to the predictions.
"""
rows: List[Any] = []
df = self._create_dataframe(False)
dfg = df.groupby(self.LABEL_COL).agg({self.LABEL_COL: 'count'}).\
rename(columns={self.LABEL_COL: 'count'})
bids = self.epoch_result.batch_ids
batch: Batch = self.stash[bids[0]]
le: LabelEncoder = self._narrow_encoder(batch)
for ann_id, dfg in df.groupby(self.LABEL_COL):
try:
self._add_metric_row(le, dfg, ann_id, rows)
except ValueError as e:
logger.error(f'Could not create metrics for {ann_id}: {e}')
dfr = pd.DataFrame(rows, columns=self.METRICS_DF_COLUMNS)
dfr = dfr.sort_values(self.LABEL_COL).reset_index(drop=True)
return dfr
@property
def majority_label_metrics(self) -> ClassificationMetrics:
"""Compute metrics of the majority label of the test dataset.
"""
df: pd.DataFrame = self.dataframe
le = LabelEncoder()
gold: np.ndarray = le.fit_transform(df[self.ID_COL].to_list())
max_id: str = df.groupby(self.ID_COL)[self.ID_COL].agg('count').idxmax()
majlab: np.ndarray = np.repeat(le.transform([max_id])[0], gold.shape[0])
return ClassificationMetrics(gold, majlab, gold.shape[0])
@dataclass
class SequencePredictionsDataFrameFactory(PredictionsDataFrameFactory):
"""Like the super class but create predictions for sequence based models.
:see: :class:`~zensols.deeplearn.model.sequence.SequenceNetworkModule`
"""
def _calc_len(self, batch: Batch) -> int:
return sum(map(len, batch.data_points))
def _transform_dataframe(self, batch: Batch, labs: List[str],
preds: List[str]):
dfs: List[pd.DataFrame] = []
start: int = 0
transform: Callable = self.data_point_transform
for dp, lab, pred in zip(batch.data_points, labs, preds):
end = start + len(dp)
df = pd.DataFrame({
self.ID_COL: dp.id,
self.LABEL_COL: labs[start:end],
self.PREDICTION_COL: preds[start:end]})
df[list(self.column_names)] = transform(dp)
dfs.append(df)
start = end
return
|
pd.concat(dfs)
|
pandas.concat
|
#!/usr/bin/env python
import os
import sys
import sqlite3
import pandas as pd
import numpy as np
from scraper import create_data_folder, read_config
from collections import OrderedDict
def main():
"""
Mainly for debugging purposes.
"""
config_file = read_config()
# Pick a file
try:
csv_name = os.listdir(config_file["downloaded_data_path"])[0]
except:
print("Could not read csv file.. Please check you've downloaded data beforehand using scraper.py.")
exit(1)
# Read the data
df = read_data(csv_name, config_file)
# Extract information
sanitized_dataframe = extract_event_information(df)
# Save extracted information
create_data_folder(config_file["extracted_data_path"])
save_dataframe(sanitized_dataframe, "test", config_file)
def save_dataframe(df, df_root_name, config_file):
"""
Handles all the saving process into SQL and CSV formats.
@Param df: dataframe to save.
@Param df_root_name: name of the file to create without the extension.
@Param config_file: Configuration file.
"""
sqlite_read_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.db")
csv_save_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.csv")
save_dataframe_to_sqlite(df, sqlite_read_path)
save_dataframe_to_csv(sqlite_read_path, csv_save_path)
def save_dataframe_to_csv(db_path, save_path):
"""
Saves the data as csv in the given path by reading the sqlite3 database.
Makes sure to merge the values with those already existing at the same
location (event, latitude, location).
@Param db_path: path to the sqlite3 database.
@Param save_path: path to the csv file to create.
"""
# Read the SQL database
db = sqlite3.connect(db_path)
db_df = pd.read_sql_query("SELECT * FROM events", db)
# Transforming columns to make them compatible with storing multiple values
db_df["event_document"] = db_df["event_document"].apply(lambda x: [x])
db_df["event_date"] = db_df["event_date"].apply(lambda x: [x])
db_df["event_importance"] = db_df["event_importance"].apply(lambda x: [x])
db_df["event_source_name"] = db_df["event_source_name"].apply(lambda x: [x])
# merge lines with identical position and event.
db_df = db_df.groupby(["event", "event_latitude", "event_longitude"], as_index=False).aggregate({'event_document':np.sum, "event_importance": np.sum, "event_date": np.sum, "event_source_name": np.sum})
# Storing the information
db_df.to_csv(save_path, mode='w', index=False)
# Closing the database connexion
db.commit()
db.close()
def read_data(csv_name, config_file, add_root_dir=True):
"""
Reads the csv file given and returns the associated dataframe.
@Param csv_name: Name of the csv file to read.
@Param config_file: Configuration file.
@Return: Dataframe containing the csv information.
"""
print("Reading the csv file...")
csv = csv_name
if add_root_dir:
data_dir = config_file["downloaded_data_path"]
csv = os.path.join(data_dir, csv_name)
pd.set_option('display.float_format', lambda x: '%.3f' % x) # Avoid scientific notation
dataframe = pd.read_csv(csv,
delimiter = "\t",
names=["ID", "event_date", "source_identifier", "source_name", "document_id", "V1Counts_10", "V2_1Counts", "V1Themes", "V2EnhancedThemes", "V1Locations", "V2EnhancedLocations", "V1Persons",
"V2EnhancedPersons", "V1organizations", "V2EnhancedOrganizations", "V1_5tone", "V2_1EnhancedDates", "V2GCam", "V2_1SharingImage", "V2_1RelatedImages", "V2_1SocialImageEmbeds", "V2_1SocialVideoEmbeds",
"V2_1Quotations", "V2_1AllNames", "V2_1Amounts", "V2_1TranslationInfo", "V2ExtrasXML"],
encoding="ISO-8859-1")
return dataframe
def extract_event_information(dataframe):
"""
Extracts the information related to the events from the dataframe and returns a transformed dataframe.
The new dataframe contains information related to the event type, its importance and position (lat, long).
@Params dataframe: represents all the information contained in the initial csv.
@Return: dataframe containing the extracted information regarding the events.
"""
print("Extracting information from the csv file...")
events_columns = ["event", "event_importance", "event_latitude", "event_longitude"]
sanitized_dataframe =
|
pd.DataFrame(columns=events_columns)
|
pandas.DataFrame
|
from decimal import Decimal
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestDataFrameUnaryOperators:
# __pos__, __neg__, __inv__
@pytest.mark.parametrize(
"df,expected",
[
(pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})),
(pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})),
(
pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}),
),
],
)
def test_neg_numeric(self, df, expected):
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df, expected",
[
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),
],
)
def test_neg_object(self, df, expected):
# GH#21380
df = pd.DataFrame({"a": df})
expected = pd.DataFrame({"a": expected})
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": ["a", "b"]}),
pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}),
],
)
def test_neg_raises(self, df):
msg = (
"bad operand type for unary -: 'str'|"
r"Unary negative expects numeric dtype, not datetime64\[ns\]"
)
with pytest.raises(TypeError, match=msg):
(-df)
with pytest.raises(TypeError, match=msg):
(-df["a"])
def test_invert(self, float_frame):
df = float_frame
tm.assert_frame_equal(-(df < 0), ~(df < 0))
def test_invert_mixed(self):
shape = (10, 5)
df = pd.concat(
[
pd.DataFrame(np.zeros(shape, dtype="bool")),
pd.DataFrame(np.zeros(shape, dtype=int)),
],
axis=1,
ignore_index=True,
)
result = ~df
expected = pd.concat(
[
pd.DataFrame(np.ones(shape, dtype="bool")),
pd.DataFrame(-np.ones(shape, dtype=int)),
],
axis=1,
ignore_index=True,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [-1, 1]}),
pd.DataFrame({"a": [False, True]}),
pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
],
)
def test_pos_numeric(self, df):
# GH#16073
tm.assert_frame_equal(+df, df)
tm.assert_series_equal(+df["a"], df["a"])
@pytest.mark.parametrize(
"df",
[
# numpy changing behavior in the future
pytest.param(
|
pd.DataFrame({"a": ["a", "b"]})
|
pandas.DataFrame
|
import pandas as pd
from itertools import combinations
import random
#accepts a csv file with two columns: "Name" and "Rating". making the csv is outside the scope, but a google form can easily generate one
dat = pd.read_csv('form_data.csv')
courts = 2
rounds = 5
numpairs_round=2*courts
players_rate = list(dat[["Name","Rating"]].to_records(index=False))
#players_rate=[('raga', 3), ('prav', 3), ('savb', 1), ('disc', 2), ('ricd', 3), ('abs', 1), ('bane', 3), ('cotf', 3),('bulg', 1),('zilh',1),('kih',2),('kilj',3),('yek',1),('rewl',1),('ttm',2),('fgn',3),('hjko',3)]
players=[x for (x,y) in players_rate]
dtn_cnt = dict((x,0) for x in players)
# dtn=dict((x,y) for (x,y) in players_rate)
# or below
dtn={}
for (x,y) in players_rate:
dtn.setdefault(x,y)
players_per_round = courts * 4
# rounds_per_player = (rounds * players_per_round)/len(players)
# the below picks up players_per_round players from the full list of players.
# This is a fair pickup meaning no player will sit out morethan one round unless we have more than double the players than courts.
# This gives a good balance if the players are a little more than required per court. For example 5-6 players for each court.
all_rounds = list(combinations(players,players_per_round))
ll=[]
ll.append(all_rounds[0])
all_rounds.remove(all_rounds[0])
for i in range(len(all_rounds)):
[x for x in all_rounds if set(set(players).difference(set(ll[-1]))).isdisjoint(set(players).difference(set(x))) and (ll.append(x) or all_rounds.remove(x))]
if all_rounds:
ll.extend(all_rounds)
# this will generate all possible pairs with each player getting a chance to play for each player.
# the logic makes sure once a player is chosen for a court, he is not in any other court in the same round.
# AI: could be simplified
full_fixtures=[]
for round in ll:
r_pl_f=[x for x in players if x in round]
full_pairs = list(combinations(r_pl_f,2))
r_f_p = [(a,b,(dtn[a]+dtn[b])/2) for (a,b) in full_pairs]
#r_f_p.sort(key=lambda x: x[2])
random.shuffle(r_f_p)
r_f_p_save = r_f_p.copy()
fix_court_iter=[]
while r_f_p:
p1=r_f_p.pop()
if p1 in full_fixtures:
continue
fix_court_iter.append(p1)
r_f_p=[(a,b,c) if not any(a in e or b in e for e in fix_court_iter) else 'RE' for (a,b,c) in r_f_p]
r_f_p=list(filter(lambda a: a != 'RE', r_f_p))
if len(fix_court_iter) < numpairs_round:
r_f_p = r_f_p_save.copy()
r_f_p=[(a,b,c) if not any(a in e or b in e for e in fix_court_iter) else 'RE' for (a,b,c) in r_f_p]
r_f_p=list(filter(lambda a: a != 'RE', r_f_p))
random.shuffle(r_f_p)
while r_f_p and len(fix_court_iter) < numpairs_round:
p2 = r_f_p.pop()
if p2 not in full_fixtures:
fix_court_iter.append(p2)
if len(fix_court_iter) == numpairs_round:
full_fixtures.extend(fix_court_iter)
if full_fixtures[-1] != ('--','--',0):
full_fixtures.append(('--', '--', 0))
full_fixtures.append(('--', '--', 0))
# Here pairs who didn't get chance will get chance if we can't balance above.
part_fix=[(a,b) for (a,b,c) in full_fixtures if a !='--']
full_pairs = list(combinations(players,2))
diff_pairs=list(set(full_pairs).difference(set(part_fix)))
dpr=[(a,b,(dtn[a]+dtn[b])/2) for (a,b) in diff_pairs]
#dpr.sort(key=lambda x: x[2])
while dpr and len(dpr) > numpairs_round-1:
fix_court_iter=[]
while dpr and len(fix_court_iter) < numpairs_round:
p1=dpr.pop()
if p1 in full_fixtures:
continue
fix_court_iter.append(p1)
dpr=[(a,b,c) if not any(a in e or b in e for e in fix_court_iter) else 'RE' for (a,b,c) in dpr]
dpr=list(filter(lambda a: a != 'RE', dpr))
print(fix_court_iter)
if len(fix_court_iter) == numpairs_round:
full_fixtures.extend(fix_court_iter)
if full_fixtures[-1] != ('--','--',0):
full_fixtures.append(('--', '--', 0))
full_fixtures.append(('--', '--', 0))
if len(fix_court_iter) < numpairs_round:
full_fixtures.append(('--', '--', 0))
full_fixtures.append(('--', '--', 0))
full_fixtures.extend(fix_court_iter)
break
part_fix=[(a,b) for (a,b,c) in full_fixtures if a !='--']
diff_pairs=list(set(full_pairs).difference(set(part_fix)))
dpr=[(a,b,(dtn[a]+dtn[b])/2) for (a,b) in diff_pairs]
dpr.sort(key=lambda x: x[2])
it=iter(full_fixtures)
final=list(zip(it,it))
# Now output (i.e. the final fixtures) goes into another csv file "full_fixtures.csv"
df=pd.DataFrame()
# df = pd.DataFrame.from_records(final,columns=['Team1','Team2'])
df.insert(0,'Team1', [a+" , "+b if a!='--' else ' --' for ((a,b,c),(d,e,f)) in final])
df.insert(1,'Team1 avg', [c if a!='--' else ' --' for ((a,b,c),(d,e,f)) in final])
df.insert(2,'Fixtures','vs',True)
df.insert(3,'Team2', [d+" , "+e if d!='--' else ' --' for ((a,b,c),(d,e,f)) in final])
df.insert(4,'Team2 avg', [f if d!='--' else ' --' for ((a,b,c),(d,e,f)) in final])
df.insert(5,'','',True)
df.insert(6,'','',True)
df.insert(7,'players',
|
pd.Series(players)
|
pandas.Series
|
"""
Functions to correct and filter data matrix from LC-MS Metabolomics data.
"""
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline, interp1d
from statsmodels.nonparametric.smoothers_lowess import lowess
from typing import List, Callable, Union, Optional
from ._names import *
def input_na(df: pd.DataFrame, classes: pd.Series, mode: str) -> pd.DataFrame:
"""
Fill missing values.
Parameters
----------
df : pd.DataFrame
classes: ps.Series
mode : {'zero', 'mean', 'min'}
Returns
-------
filled : pd.DataFrame
"""
if mode == "zero":
return df.fillna(0)
elif mode == "mean":
return (df.groupby(classes)
.apply(lambda x: x.fillna(x.mean()))
.droplevel(0))
elif mode == "min":
return (df.groupby(classes)
.apply(lambda x: x.fillna(x.min()))
.droplevel(0))
else:
msg = "mode should be `zero`, `mean` or `min`"
raise ValueError(msg)
def average_replicates(data: pd.DataFrame, sample_id: pd.Series,
classes: pd.Series,
process_classes: List[str]) -> pd.DataFrame:
"""
Group samples by id and computes the average.
Parameters
----------
data: pd.DataFrame
sample_id: pd.Series
classes: pd.Series
process_classes: list[str]
Returns
-------
pd.DataFrame
"""
include_samples = classes[classes.isin(process_classes)].index
exclude_samples = classes[~classes.isin(process_classes)].index
mapper = sample_id[include_samples].drop_duplicates()
mapper = pd.Series(data=mapper.index, index=mapper.values)
included_data = data.loc[include_samples, :]
excluded_data = data.loc[exclude_samples, :]
averaged_data = (included_data.groupby(sample_id[include_samples])
.mean())
averaged_data.index = averaged_data.index.map(mapper)
result = pd.concat((averaged_data, excluded_data)).sort_index()
return result
def correct_blanks(df: pd.DataFrame, classes: pd.Series,
corrector_classes: List[str], process_classes: List[str],
factor: float = 1.0,
mode: Union[str, Callable] = "mean",
process_blanks: bool = True) -> pd.DataFrame:
"""
Correct samples using blanks.
Parameters
----------
df : pandas.DataFrame
Data to correct.
classes : pandas.Series
Samples class labels.
corrector_classes : list[str]
Classes to be used as blanks.
process_classes : list[str]
Classes to be used as samples
process_blanks : bool
If True apply blank correction to corrector classes.
factor : float
factor used to convert low values to zero (see notes)
mode : {'mean', 'max', 'lod', 'loq'} or function
Returns
-------
corrected : pandas.DataFrame
Data with applied correction
"""
corrector = {"max": lambda x: x.max(),
"mean": lambda x: x.mean(),
"lod": lambda x: x.mean() + 3 * x.std(),
"loq": lambda x: x.mean() + 10 * x.std()}
if hasattr(mode, "__call__"):
corrector = mode
else:
corrector = corrector[mode]
samples = df[classes.isin(process_classes)]
blanks = df[classes.isin(corrector_classes)]
correction = corrector(blanks)
corrected = samples - correction
corrected[(samples - factor * correction) < 0] = 0
df[classes.isin(process_classes)] = corrected
if process_blanks:
corrected_blanks = blanks - correction
corrected_blanks[(blanks - factor * correction) < 0] = 0
df[classes.isin(corrector_classes)] = corrected_blanks
return df
def _loocv_loess(x: pd.Series, y: pd.Series, interpolator: Callable,
frac: Optional[float] = None) -> tuple:
"""
Helper function for batch_correction. Computes loess correction with LOOCV.
Parameters
----------
x: pd.Series
y: pd.Series
frac: float, optional
fraction of sample to use in LOESS correction. If None, determines the
best value using LOOCV.
interpolator = callable
interpolator function used to predict new values.
Returns
-------
corrected: pd.Series
LOESS corrected data
"""
if frac is None:
# valid frac values, from 4/N to 1/N, where N is the number of corrector
# samples.
frac_list = [k / x.size for k in range(4, x.size + 1)]
rms = np.inf # initial value for root mean square error
best_frac = 1
for frac in frac_list:
curr_rms = 0
for loocv_index in x.index[1:-1]:
y_temp = y.drop(loocv_index)
x_temp = x.drop(loocv_index)
y_loess = lowess(y_temp, x_temp, return_sorted=False, frac=frac)
interp = interpolator(x_temp, y_loess)
curr_rms += (y[loocv_index] - interp(x[loocv_index])) ** 2
if rms > curr_rms:
best_frac = frac
rms = curr_rms
frac = best_frac
return lowess(y, x, return_sorted=False, frac=frac)
def _generate_batches(df: pd.DataFrame, run_order: pd.Series, batch: pd.Series,
classes: pd.Series, corrector_classes: List[str],
process_classes: List[str]):
batch_order = (pd.concat((batch, run_order), axis=1)
.sort_values([_sample_batch, _sample_order]))
grouped = batch_order.groupby(_sample_batch)
for n_batch, group in grouped:
df_batch = df.loc[group.index, :]
classes_batch = classes[group.index]
process_df = df_batch.loc[classes_batch.isin(process_classes), :]
corrector_df = df_batch.loc[classes_batch.isin(corrector_classes), :]
process_order = run_order[process_df.index]
corrector_order = run_order[corrector_df.index]
batch_order = (run_order[corrector_df.index.union(process_df.index)]
.sort_values())
corrector_df = corrector_df.set_index(corrector_order).sort_index()
process_df = process_df.set_index(process_order).sort_index()
yield corrector_df, process_df, batch_order
def get_outside_bounds_index(data: Union[pd.Series, pd.DataFrame], lb: float,
ub: float) -> pd.Index:
"""
return index of columns with values outside bounds.
Parameters
----------
data: pd.Series or pd.DataFrame
lb: float
lower bound
ub: float
upper bound
Returns
-------
"""
result = ((data < lb) | (data > ub))
if isinstance(data, pd.DataFrame):
result = result.all()
if result.empty:
return pd.Index([])
else:
return result[result].index
def batch_ext(order: pd.Series, batch: pd.Series, classes: pd.Series,
class_list: List[str], ext: str) -> pd.Series:
"""
get minimum/maximum order of samples of classes in class_list. Auxiliary
function to be used with BatchChecker / FeatureCheckerBatchCorrection
Parameters
----------
order: pandas.Series
run order
batch: pandas.Series
batch number
classes: pandas.Series
sample classes
class_list: list[str]
classes to be considered
ext: {"min", "max"}
Search for the min/max order in each batch.
Returns
-------
pd.Series with the corresponding min/max order with batch as index.
"""
func = {"min": lambda x: x.min(), "max": lambda x: x.max()}
func = func[ext]
ext_order = (order
.groupby([classes, batch])
.apply(func)
.reset_index()
.groupby(classes.name)
.filter(lambda x: x.name in class_list)
.groupby(batch.name)
.apply(func)[order.name])
return ext_order
def check_qc_prevalence(data_matrix: pd.DataFrame,
batch: pd.Series, classes: pd.Series,
qc_classes: List[str], sample_classes: List[str],
threshold: float = 0,
min_qc_dr: float = 0.9) -> pd.Index:
"""
Remove features with low detection rate in the QC samples. Also check that
each feature is detected in the first and last block (this step is necessary
interpolate the bias contribution to biological samples).
Aux function to use in the BatchCorrector Pipeline.
Parameters
----------
data_matrix: DataFrame
batch: Series
classes: Series
qc_classes: List[str]
sample_classes: List[str]
threshold: float
min_qc_dr: float
Returns
-------
index of invalid features
"""
invalid_features =
|
pd.Index([])
|
pandas.Index
|
import pandas as pd
import numpy as np
import math
import pickle
from scipy import stats
import scipy.io
from scipy.spatial.distance import pdist
from scipy.linalg import cholesky
from scipy.io import loadmat
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report,roc_auc_score,recall_score,precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.model_selection import StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
import SMOTE
import CFS
import metrices_V2 as metrices
import platform
from os import listdir
from os.path import isfile, join
from glob import glob
from pathlib import Path
import sys
import os
import copy
import traceback
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
def package_vs_file(_type):
dfs = ['RQ_cross_' + _type + '_RF_HPO.pkl', 'RQ_package_' + _type + "_RF_HPO.pkl"]
final_df = pd.DataFrame()
metrics = ['precision', 'recall', 'pf', 'auc', 'pci_20','ifa']
i = 0
for metric in metrics:
data = []
for df in dfs:
file = pd.read_pickle('results/Performance/' + df)
if metric == 'ifa':
l = [np.nanmedian(sublist)/100 for sublist in list(file[metric].values())]
else:
l = [np.nanmedian(sublist) for sublist in list(file[metric].values())]
data.append(l)
data_df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from arch.bootstrap.base import optimal_block_length
def test_block_length():
rs = np.random.RandomState(0)
e = rs.standard_normal(10000 + 100)
y = e
for i in range(1, len(e)):
y[i] = 0.3 * y[i - 1] + e[i]
s = pd.Series(y[100:], name="x")
bl = optimal_block_length(s)
sb, cb = bl.loc["x"]
assert_allclose(sb, 13.635665, rtol=1e-4)
assert_allclose(cb, 15.60894, rtol=1e-4)
df =
|
pd.DataFrame([s, s])
|
pandas.DataFrame
|
import pandas
from styleframe import StyleFrame, Styler
def get_all_tasks_from_excel(file: str) -> pandas.DataFrame:
dataframe =
|
pandas.read_excel(file)
|
pandas.read_excel
|
import forecastio #module to get weather data from darksky.net
import pandas as pd
import numpy as np
import json
from pandas.io.json import json_normalize
from calendar import monthrange
import os.path
import datetime
#api_key = '276d9b4ae748ec5d42ab2ababe8435cc' #apikey obtained from darksky.net
#api_key = '<KEY>' #apikey obtained from darksky.net by Yash
api_key = '<KEY>' #another apikey obtained from darksky.net by Yash
def get_met_data(start_date, numdays, api_key, lat, lng, station_id):
"Function to get weather"
#get url to retrieve weather information
date_list = [start_date + datetime.timedelta(days = x) for x in range(0, numdays)]
hist = np.arange(0, len(date_list)).tolist()
forecast = []
for n in hist:
# If this raises an error, it's probably because you exhausted the allocated calls for the
# api key currently in use. Scroll up to top, switch which key is active, or go get another.
forecast.append(forecastio.load_forecast(api_key, lat, lng, date_list[n]))
#jspn object
met_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
from typing import Dict, List, Type, Tuple
import kats.utils.time_series_parameter_tuning as tpt
import numpy as np
import pandas as pd
from kats.detectors import changepoint_evaluator
from kats.detectors import cusum_model
from kats.detectors.changepoint_evaluator import TuringEvaluator
from kats.detectors.detector import DetectorModel
from kats.detectors.detector_benchmark import (
decompose_params,
DetectorModelSet,
SUPPORTED_METRICS,
)
from kats.utils.time_series_parameter_tuning import TimeSeriesParameterTuning
from pymoo.factory import get_algorithm, get_crossover, get_mutation, get_sampling
from pymoo.model.problem import Problem
from pymoo.model.result import Result
from pymoo.optimize import minimize
MINIMIZE = "minimize"
MAXIMIZE = "maximize"
OPTIMIZATION_GOAL_OPTIONS = {MINIMIZE, MAXIMIZE}
class HPT_Problem(Problem):
"""
Multi-objective hyper parameter tuning problem.
You can specify the objectives that you want optimize from the list of SUPPORTED_METRICS. For each objective you need to
provide optimization goal (minimize or maximize). For example, if you want to minimize delay and maximize F1-score you
could provide objectives_and_goals = {"f_score": "maximize", "delay": "minimize"}.
You can also provide more than two objectives if you like.
"""
def __init__(
self,
search_grid: TimeSeriesParameterTuning,
data_df: pd.DataFrame,
objectives_and_goals: Dict[str, str],
):
self._validate_objectives_and_goals(objectives_and_goals)
self.objectives_and_goals = objectives_and_goals
# Make a list so that we always calculate fitness objectives in deterministic order.
self.objectives = list(objectives_and_goals.keys())
tunable_parameters = search_grid.get_search_space().tunable_parameters
self.par_to_val = {}
for par in tunable_parameters:
self.par_to_val[par] = tunable_parameters[par].values
# Make a list of the keys (tunable parameters) so that the order is deterministic.
self.tunable_parameters = list(tunable_parameters.keys())
self.lower_limits, self.upper_limits = self.get_upper_and_lower_limits()
self.n_vars = len(tunable_parameters)
self.all_solutions = {}
self.data_df = data_df
super().__init__(
n_var=self.n_vars,
n_obj=len(self.objectives),
n_constr=0, # Currently no constraints for the fitness objectives.
xl=np.array(self.lower_limits),
xu=np.array(self.upper_limits),
# We solve an integer problem where each integer maps to hyper parameter.
type_var=int,
elementwise_evaluation=True,
)
self.turing_model = changepoint_evaluator.TuringEvaluator(
is_detector_model=True, detector=cusum_model.CUSUMDetectorModel
)
def _validate_objectives_and_goals(self, objectives_and_goals: Dict[str, str]):
self._check_if_all_valid(
values_to_check=list(objectives_and_goals.keys()),
expected_values=SUPPORTED_METRICS,
explanation="Objectives",
)
self._check_if_all_valid(
values_to_check=list(objectives_and_goals.values()),
expected_values=OPTIMIZATION_GOAL_OPTIONS,
explanation="Optimization goal",
)
def _check_if_all_valid(
self, values_to_check: List[str], expected_values: set, explanation: str
):
if not all(
[value_to_check in expected_values for value_to_check in values_to_check]
):
raise Exception(
f"{explanation} must be listed in {expected_values}. You provided {values_to_check}."
)
def _evaluate(self, x: np.ndarray, out: np.ndarray, *args, **kwargs):
out["F"] = self.get_fitness(x)
def get_fitness(self, x: np.ndarray):
pars = self.decode_solution(x)
params_model, threshold_low, threshold_high = decompose_params(pars)
results = self.turing_model.evaluate(
data=self.data_df,
model_params=params_model,
threshold_low=threshold_low,
threshold_high=threshold_high,
)
self.all_solutions[self.get_unique_id_for_solution(x)] = results
fitness = [0] * self.n_obj
averaged_results = np.mean(results)
for i in range(self.n_obj):
# For maximization problem, multiply the result by -1.
fitness[i] = (
averaged_results[self.objectives[i]]
if self.objectives_and_goals[self.objectives[i]] == MINIMIZE
else -averaged_results[self.objectives[i]]
)
return fitness
def get_upper_and_lower_limits(self):
upper_limits = []
"""
We assign the limits in the order of tunable_parameters list. The order of that list will not
change which is very important so that we can match the solution vector back to tunable parameters.
"""
for key in self.par_to_val:
upper_limits.append(len(self.par_to_val[key]) - 1)
# All tunable_parameters should have at least one option.
lower_limits = [0] * len(self.par_to_val)
return lower_limits, upper_limits
def decode_solution(self, x: np.ndarray) -> Dict[str, float]:
pars = {}
i = 0
for key in self.tunable_parameters:
pars[key] = self.par_to_val[key][x[i]]
i += 1
return pars
def get_unique_id_for_solution(self, x: np.ndarray) -> str:
return ",".join([str(x_component) for x_component in x])
class MultiObjectiveModelOptimizer(DetectorModelSet):
def __init__(
self,
model_name: str,
model: Type[DetectorModel],
parameters_space: List[Dict],
data_df: pd.DataFrame,
n_gen: int,
pop_size: int,
objectives_and_goals: Dict[str, str],
):
super().__init__(model_name, model)
self.model_name = model_name
self.model = model
self.result = {}
self.solutions = pd.DataFrame()
self.parameters = parameters_space
self.n_gen = n_gen
self.pop_size = pop_size
self.hpt_problem = HPT_Problem(
search_grid=tpt.SearchMethodFactory.create_search_method(
parameters=self.parameters
),
data_df=data_df,
objectives_and_goals=objectives_and_goals,
)
# This overrides the base method
def evaluate(
self, data_df: pd.DataFrame
) -> Tuple[Dict[str, pd.DataFrame], TuringEvaluator]:
logging.info("Creating multi-objective optimization problem.")
method = get_algorithm(
"nsga2",
pop_size=self.pop_size,
crossover=get_crossover(
"int_sbx",
prob=1.0,
eta=3.0,
prob_per_variable=(1 / self.hpt_problem.n_var),
),
mutation=get_mutation("int_pm", eta=3.0),
eliminate_duplicates=True,
sampling=get_sampling("int_random"),
)
logging.info(
"Running multi-objective optimization with pop_size {self.pop_size} and n_gen {self.n_gen}."
)
res = minimize(
self.hpt_problem,
method,
("n_gen", self.n_gen),
verbose=True,
seed=1,
save_history=False,
)
self.get_results(res)
self.get_hyper_parameters_and_results_for_non_dominated_solutions(res)
logging.info("Multi-objective optimization completed.")
return self.result, self.hpt_problem.turing_model
def get_params(self):
return self.solutions
def get_results(self, res: Result):
self.result = {}
for id in range(len(res.X)):
self.result[f"moo_solution_{id}"] = self.hpt_problem.all_solutions[
self.hpt_problem.get_unique_id_for_solution(res.X[id])
]
def get_hyper_parameters_and_results_for_non_dominated_solutions(self, res: Result):
solutions = []
for id in range(len(res.X)):
decoded_solution = self.hpt_problem.decode_solution(res.X[id])
uniq_id = self.hpt_problem.get_unique_id_for_solution(res.X[id])
curr_solution_mean = np.mean(self.hpt_problem.all_solutions[uniq_id])
decoded_solution["solution_id"] = id
for metric in SUPPORTED_METRICS:
decoded_solution[metric] = curr_solution_mean[metric]
solutions.append(decoded_solution)
self.solutions =
|
pd.DataFrame(solutions)
|
pandas.DataFrame
|
"""
Atmospheric pCO2
----------------
This module is not imported by default and has to be manually imported.
This is because it is used only in production of the SeaFlux atmospheric dataset
Contains functions for the full process:
1. Download the NOAA marine boundary layer product (done)
2. Interpolate the product onto a standard grid (done)
3. Download MSLP (ERA5), SST (AVHRR), salinity (EN4) for ATM calculations (done)
4. Convert atmospheric xCO2 to pCO2 with the data
5. Interpolate the final year (2020)
Note that there are no tests for this code, so it is very likely to break
"""
from pathlib import Path as path
base = str(path(__file__).resolve().parent.parent)
def main(
noaa_mbl_url,
download_dest="../data/raw/",
aux_catalog_name="../data/aux_data.yml",
processed_dest="../data/processed/",
output_dest="../data/output/",
):
"""to be called when creating the atmospheric pCO2"""
import xarray as xr
from fetch_data import read_catalog
from pandas import Timestamp
from .aux_vars import download_era5_slp, download_salinity, download_sst_ice
from .utils import center_time_on_15th, preprocess, save_seaflux
if path(output_dest).is_file():
return output_dest
cat = read_catalog(aux_catalog_name)
salt = download_salinity(cat["en4_g10"], f"{processed_dest}/en4_salt_temp.nc")
temp = download_sst_ice(cat["oisst_v2"], f"{processed_dest}/noaa_oisst_sst_icec.nc")
pres = download_era5_slp(
download_dest=cat["era5_mslp"]["dest"],
process_dest=f"{processed_dest}/era5_mslp_monthly.nc",
)
ds = xr.merge(
[
xr.open_dataset(salt)["salinity"].rename("saltPSU"),
xr.open_dataset(temp)["sst"].rename("tempC"),
xr.open_dataset(pres)["sp"].rename("presPa"),
]
)
noaa_mbl_xco2 = (
download_noaa_mbl(
noaa_mbl_url,
download_dest=f"{download_dest}/co2_GHGreference_surface.txt",
target_lat=ds.lat.values,
target_lon=ds.lon.values,
)
.resample(time="1MS")
.mean()
)
t0, t1 = ds.time.values[[0, -1]]
noaa_mbl_xco2 = center_time_on_15th(noaa_mbl_xco2).sel(time=slice(t0, t1))
t0, t1 = noaa_mbl_xco2.time.values[[0, -1]]
ds = ds.sel(time=slice(t0, t1))
atm_pco2 = atm_xCO2_to_pCO2(
noaa_mbl_xco2, ds.presPa.where(ds.tempC.notnull()) / 100, ds.tempC, ds.saltPSU
)
atm_pco2 = preprocess()(
xr.DataArray(
data=atm_pco2,
dims=ds.tempC.dims,
coords=ds.tempC.coords,
name="pco2atm",
attrs=dict(
long_name=(
"partial_pressure_of_carbon_dioxide_in_the_marine_boundary_layer"
),
short_name="pco2atm",
units="uatm",
description=(
"Atmospheric pCO2 for the marine boundary layer is calculated "
"from the NOAAs marine boundary layer pCO2 with: xCO2 * (Patm "
"- pH2O). Where pH2O is calculated using vapour pressure from "
"Dickson et al. (2007)"
),
history=(
getattr(noaa_mbl_xco2, "history", "").strip(";") + ";\n"
f"[SeaFlux @ {Timestamp.today():%Y-%m-%d}] "
f"pCO2 calculated from xCO2 * (Patm - pH2O), where "
f"pH2O is calculated with Dickson et al. (2007)"
),
citation=(
"<NAME> and <NAME>, NOAA/ESRL "
"(www.esrl.noaa.gov/gmd/ccgg/trends/)"
),
),
)
)
variable = "pco2atm"
pco2atm = interpolate_year(atm_pco2).to_dataset(name=variable)
sname = save_seaflux(pco2atm, output_dest, variable)
return sname
def atm_xCO2_to_pCO2(xCO2_ppm, slp_hPa, tempSW_C, salt):
"""
Convert atmospheric xCO2 to pCO2 with correction for water vapour pressure
pCO2atm = xCO2atm * (Press - pH2O)
Args:
xCO2_ppm (array): atmospheric, or marine boundary layer mole fraction of CO2 (NOAA MBL)
slp_hPa (array): sea water temperature in degrees C (ERA5 recommended)
tempSW_C (array): atmospheric pressure in hecto Pascal (NOAA AVHRR OISSTv2 recommended)
salt (array): sea surface salinity in PSU (EN4 salinity)
Returns:
array: note that output will be an np.ndarray regardless of input
"""
from numpy import array
from .. import check_units as check
from .. import vapour_pressure as vapress
print("[SeaFlux] Converting xCO2 to pCO2")
xCO2 = array(xCO2_ppm)
# check units and mask where outsider of range
Tsw = check.temp_K(tempSW_C + 273.15)
Ssw = check.salt(salt)
Patm = check.pres_atm(slp_hPa / 1013.25)
pH2O = vapress.dickson2007(Ssw, Tsw)
pCO2atm = xCO2 * (Patm - pH2O)
return pCO2atm
def read_noaa_mbl_url(noaa_mbl_url, dest):
"""Downloads url and reads in the MBL surface file
Args:
noaa_mbl_url (str): the address for the noaa surface file
dest (str): the destination to which the raw file will be saved
Returns:
pd.Series: multindexed series of xCO2 with (time, lat) as coords.
"""
import re
from pathlib import Path
import numpy as np
import pandas as pd
import pooch
# save to temporary location with pooch
print(
f"[SeaFlux] Downloading {noaa_mbl_url} to {dest} and reading in as pd.DataFrame"
)
dest = Path(dest)
fname = pooch.retrieve(
url=noaa_mbl_url,
known_hash=None,
path=str(dest.parent),
fname=str(dest.name),
)
# find start line
is_mbl_surface = False
for start_line, line in enumerate(open(fname)):
if re.findall("MBL.*SURFACE", line):
is_mbl_surface = True
if not line.startswith("#"):
break
if not is_mbl_surface:
raise Exception(
"The file at the provided url is not an MBL SURFACE file. "
"Please check that you have provided the surface url. "
)
# read fixed width file CO2
df =
|
pd.read_fwf(fname, skiprows=start_line, header=None, index_col=0)
|
pandas.read_fwf
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import pandas
import numpy as np
import pyarrow
import pytest
import re
from modin.config import IsExperimental, Engine, StorageFormat
from modin.pandas.test.utils import io_ops_bad_exc
from .utils import eval_io, ForceOmnisciImport, set_execution_mode, run_and_compare
from pandas.core.dtypes.common import is_list_like
IsExperimental.put(True)
Engine.put("native")
StorageFormat.put("omnisci")
import modin.pandas as pd
from modin.pandas.test.utils import (
df_equals,
bool_arg_values,
to_pandas,
test_data_values,
test_data_keys,
generate_multiindex,
eval_general,
df_equals_with_non_stable_indices,
)
from modin.utils import try_cast_to_pandas
from modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager import (
OmnisciOnNativeDataframePartitionManager,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.df_algebra import (
FrameNode,
)
@pytest.mark.usefixtures("TestReadCSVFixture")
class TestCSV:
from modin import __file__ as modin_root
root = os.path.dirname(
os.path.dirname(os.path.abspath(modin_root)) + ".."
) # root of modin repo
boston_housing_names = [
"index",
"CRIM",
"ZN",
"INDUS",
"CHAS",
"NOX",
"RM",
"AGE",
"DIS",
"RAD",
"TAX",
"PTRATIO",
"B",
"LSTAT",
"PRICE",
]
boston_housing_dtypes = {
"index": "int64",
"CRIM": "float64",
"ZN": "float64",
"INDUS": "float64",
"CHAS": "float64",
"NOX": "float64",
"RM": "float64",
"AGE": "float64",
"DIS": "float64",
"RAD": "float64",
"TAX": "float64",
"PTRATIO": "float64",
"B": "float64",
"LSTAT": "float64",
"PRICE": "float64",
}
def test_usecols_csv(self):
"""check with the following arguments: names, dtype, skiprows, delimiter"""
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
for kwargs in (
{"delimiter": ","},
{"sep": None},
{"skiprows": 1, "names": ["A", "B", "C", "D", "E"]},
{"dtype": {"a": "int32", "e": "string"}},
{"dtype": {"a": np.dtype("int32"), "b": np.dtype("int64"), "e": "string"}},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_housing_csv(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_time_parsing(self):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_time_parsing.csv"
)
for kwargs in (
{
"skiprows": 1,
"names": [
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
"parse_dates": ["timestamp"],
"dtype": {"symbol": "string"},
},
):
rp = pandas.read_csv(csv_file, **kwargs)
rm = pd.read_csv(csv_file, engine="arrow", **kwargs)
with ForceOmnisciImport(rm):
rm = to_pandas(rm)
df_equals(rm["timestamp"].dt.year, rp["timestamp"].dt.year)
df_equals(rm["timestamp"].dt.month, rp["timestamp"].dt.month)
df_equals(rm["timestamp"].dt.day, rp["timestamp"].dt.day)
def test_csv_fillna(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
comparator=lambda df1, df2: df_equals(
df1["CRIM"].fillna(1000), df2["CRIM"].fillna(1000)
),
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.parametrize("null_dtype", ["category", "float64"])
def test_null_col(self, null_dtype):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_null_col.csv"
)
ref = pandas.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
ref["a"] = ref["a"] + ref["b"]
exp = pd.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
exp["a"] = exp["a"] + exp["b"]
# df_equals cannot compare empty categories
if null_dtype == "category":
ref["c"] = ref["c"].astype("string")
with ForceOmnisciImport(exp):
exp = to_pandas(exp)
exp["c"] = exp["c"].astype("string")
df_equals(ref, exp)
def test_read_and_concat(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
ref1 = pandas.read_csv(csv_file)
ref2 = pandas.read_csv(csv_file)
ref = pandas.concat([ref1, ref2])
exp1 = pandas.read_csv(csv_file)
exp2 = pandas.read_csv(csv_file)
exp = pd.concat([exp1, exp2])
with ForceOmnisciImport(exp):
df_equals(ref, exp)
@pytest.mark.parametrize("names", [None, ["a", "b", "c", "d", "e"]])
@pytest.mark.parametrize("header", [None, 0])
def test_from_csv(self, header, names):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
header=header,
names=names,
)
@pytest.mark.parametrize("kwargs", [{"sep": "|"}, {"delimiter": "|"}])
def test_sep_delimiter(self, kwargs):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_delim.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.skip(reason="https://github.com/modin-project/modin/issues/2174")
def test_float32(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
kwargs = {
"dtype": {"a": "float32", "b": "float32"},
}
pandas_df = pandas.read_csv(csv_file, **kwargs)
pandas_df["a"] = pandas_df["a"] + pandas_df["b"]
modin_df = pd.read_csv(csv_file, **kwargs, engine="arrow")
modin_df["a"] = modin_df["a"] + modin_df["b"]
with ForceOmnisciImport(modin_df):
df_equals(modin_df, pandas_df)
# Datetime Handling tests
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"parse_dates",
[
True,
False,
["col2"],
["c2"],
[["col2", "col3"]],
{"col23": ["col2", "col3"]},
],
)
@pytest.mark.parametrize("names", [None, [f"c{x}" for x in range(1, 7)]])
def test_read_csv_datetime(
self,
engine,
parse_dates,
names,
):
parse_dates_unsupported = isinstance(parse_dates, dict) or (
isinstance(parse_dates, list) and isinstance(parse_dates[0], list)
)
if parse_dates_unsupported and engine == "arrow" and not names:
pytest.skip(
"In these cases Modin raises `ArrowEngineException` while pandas "
"doesn't raise any exceptions that causes tests fails"
)
# In these cases Modin raises `ArrowEngineException` while pandas
# raises `ValueError`, so skipping exception type checking
skip_exc_type_check = parse_dates_unsupported and engine == "arrow"
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": engine},
check_exception_type=not skip_exc_type_check,
raising_exceptions=None if skip_exc_type_check else io_ops_bad_exc,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
names=names,
)
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"usecols",
[
None,
["col1"],
["col1", "col1"],
["col1", "col2", "col6"],
["col6", "col2", "col1"],
[0],
[0, 0],
[0, 1, 5],
[5, 1, 0],
lambda x: x in ["col1", "col2"],
],
)
def test_read_csv_col_handling(
self,
engine,
usecols,
):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(usecols),
md_extra_kwargs={"engine": engine},
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=usecols,
)
class TestMasks:
data = {
"a": [1, 1, 2, 2, 3],
"b": [None, None, 2, 1, 3],
"c": [3, None, None, 2, 1],
}
cols_values = ["a", ["a", "b"], ["a", "b", "c"]]
@pytest.mark.parametrize("cols", cols_values)
def test_projection(self, cols):
def projection(df, cols, **kwargs):
return df[cols]
run_and_compare(projection, data=self.data, cols=cols)
def test_drop(self):
def drop(df, **kwargs):
return df.drop(columns="a")
run_and_compare(drop, data=self.data)
def test_iloc(self):
def mask(df, **kwargs):
return df.iloc[[0, 1]]
run_and_compare(mask, data=self.data, allow_subqueries=True)
def test_empty(self):
def empty(df, **kwargs):
return df
run_and_compare(empty, data=None)
def test_filter(self):
def filter(df, **kwargs):
return df[df["a"] == 1]
run_and_compare(filter, data=self.data)
def test_filter_with_index(self):
def filter(df, **kwargs):
df = df.groupby("a").sum()
return df[df["b"] > 1]
run_and_compare(filter, data=self.data)
def test_filter_proj(self):
def filter(df, **kwargs):
df1 = df + 2
return df1[(df["a"] + df1["b"]) > 1]
run_and_compare(filter, data=self.data)
def test_filter_drop(self):
def filter(df, **kwargs):
df = df[["a", "b"]]
df = df[df["a"] != 1]
df["a"] = df["a"] * df["b"]
return df
run_and_compare(filter, data=self.data)
class TestMultiIndex:
data = {"a": np.arange(24), "b": np.arange(24)}
@pytest.mark.parametrize("names", [None, ["", ""], ["name", "name"]])
def test_dup_names(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j) for i in range(3) for j in range(8)], names=names
)
pandas_df = pandas.DataFrame(self.data, index=index) + 1
modin_df = pd.DataFrame(self.data, index=index) + 1
df_equals(pandas_df, modin_df)
@pytest.mark.parametrize(
"names",
[
None,
[None, "s", None],
["i1", "i2", "i3"],
["i1", "i1", "i3"],
["i1", "i2", "a"],
],
)
def test_reset_index(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=names,
)
def applier(lib):
df = lib.DataFrame(self.data, index=index) + 1
return df.reset_index()
eval_general(pd, pandas, applier)
@pytest.mark.parametrize("is_multiindex", [True, False])
@pytest.mark.parametrize(
"column_names", [None, ["level1", None], ["level1", "level2"]]
)
def test_reset_index_multicolumns(self, is_multiindex, column_names):
index = (
pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=["l1", "l2", "l3"],
)
if is_multiindex
else pandas.Index(np.arange(len(self.data["a"])), name="index")
)
columns = pandas.MultiIndex.from_tuples(
[("a", "b"), ("b", "c")], names=column_names
)
data = np.array(list(self.data.values())).T
def applier(df, **kwargs):
df = df + 1
return df.reset_index(drop=False)
run_and_compare(
fn=applier,
data=data,
constructor_kwargs={"index": index, "columns": columns},
)
def test_set_index_name(self):
index = pandas.Index.__new__(pandas.Index, data=[i for i in range(24)])
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.name = "new_name"
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_name("new_name")
df_equals(pandas_df, modin_df)
def test_set_index_names(self):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)]
)
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.names = ["new_name1", "new_name2", "new_name3"]
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_names(
["new_name1", "new_name2", "new_name3"]
)
df_equals(pandas_df, modin_df)
class TestFillna:
data = {"a": [1, 1, None], "b": [None, None, 2], "c": [3, None, None]}
values = [1, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}]
@pytest.mark.parametrize("value", values)
def test_fillna_all(self, value):
def fillna(df, value, **kwargs):
return df.fillna(value)
run_and_compare(fillna, data=self.data, value=value)
def test_fillna_bool(self):
def fillna(df, **kwargs):
df["a"] = df["a"] == 1
df["a"] = df["a"].fillna(False)
return df
run_and_compare(fillna, data=self.data)
class TestConcat:
data = {
"a": [1, 2, 3],
"b": [10, 20, 30],
"d": [1000, 2000, 3000],
"e": [11, 22, 33],
}
data2 = {
"a": [4, 5, 6],
"c": [400, 500, 600],
"b": [40, 50, 60],
"f": [444, 555, 666],
}
data3 = {
"f": [2, 3, 4],
"g": [400, 500, 600],
"h": [20, 30, 40],
}
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index):
return lib.concat(
[df1, df2], join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data2,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_with_same_df(self):
def concat(df, **kwargs):
df["f"] = df["a"]
return df
run_and_compare(concat, data=self.data)
def test_setitem_lazy(self):
def applier(df, **kwargs):
df = df + 1
df["a"] = df["a"] + 1
df["e"] = df["a"] + 1
df["new_int8"] = np.int8(10)
df["new_int16"] = np.int16(10)
df["new_int32"] = np.int32(10)
df["new_int64"] = np.int64(10)
df["new_int"] = 10
df["new_float"] = 5.5
df["new_float64"] = np.float64(10.1)
return df
run_and_compare(applier, data=self.data)
def test_setitem_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df["a"] = np.arange(3)
df["b"] = lib.Series(np.arange(3))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_insert_lazy(self):
def applier(df, **kwargs):
df = df + 1
df.insert(2, "new_int", 10)
df.insert(1, "new_float", 5.5)
df.insert(0, "new_a", df["a"] + 1)
return df
run_and_compare(applier, data=self.data)
def test_insert_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df.insert(1, "new_range", np.arange(3))
df.insert(1, "new_series", lib.Series(np.arange(3)))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_concat_many(self):
def concat(df1, df2, lib, **kwargs):
df3 = df1.copy()
df4 = df2.copy()
return lib.concat([df1, df2, df3, df4])
def sort_comparator(df1, df2):
"""Sort and verify equality of the passed frames."""
# We sort values because order of rows in the 'union all' result is inconsistent in OmniSci
df1, df2 = (
try_cast_to_pandas(df).sort_values(df.columns[0]) for df in (df1, df2)
)
return df_equals(df1, df2)
run_and_compare(
concat, data=self.data, data2=self.data2, comparator=sort_comparator
)
def test_concat_agg(self):
def concat(lib, df1, df2):
df1 = df1.groupby("a", as_index=False).agg(
{"b": "sum", "d": "sum", "e": "sum"}
)
df2 = df2.groupby("a", as_index=False).agg(
{"c": "sum", "b": "sum", "f": "sum"}
)
return lib.concat([df1, df2])
run_and_compare(concat, data=self.data, data2=self.data2, allow_subqueries=True)
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_single(self, join, sort, ignore_index):
def concat(lib, df, join, sort, ignore_index):
return lib.concat([df], join=join, sort=sort, ignore_index=ignore_index)
run_and_compare(
concat,
data=self.data,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_groupby_concat_single(self):
def concat(lib, df):
df = lib.concat([df])
return df.groupby("a").agg({"b": "min"})
run_and_compare(
concat,
data=self.data,
)
@pytest.mark.parametrize("join", ["inner"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_join(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index, **kwargs):
return lib.concat(
[df1, df2], axis=1, join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data3,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_index_name(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index("a")
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index("f")
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.name = "a"
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
def test_concat_index_names(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index(["a", "b"])
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index(["f", "h"])
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.names = ["a", "b"]
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
class TestGroupby:
data = {
"a": [1, 1, 2, 2, 2, 1],
"b": [11, 21, 12, 22, 32, 11],
"c": [101, 201, 202, 202, 302, 302],
}
cols_value = ["a", ["a", "b"]]
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).sum()
run_and_compare(groupby_sum, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_count(self, cols, as_index):
def groupby_count(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).count()
run_and_compare(groupby_count, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.xfail(
reason="Currently mean() passes a lambda into query compiler which cannot be executed on OmniSci engine"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).mean()
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_proj_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).c.sum()
run_and_compare(
groupby_sum, data=self.data, cols=cols, as_index=as_index, force_lazy=False
)
@pytest.mark.parametrize("agg", ["count", "size", "nunique"])
def test_groupby_agg(self, agg):
def groupby(df, agg, **kwargs):
return df.groupby("a").agg({"b": agg})
run_and_compare(groupby, data=self.data, agg=agg)
def test_groupby_agg_default_to_pandas(self):
def lambda_func(df, **kwargs):
return df.groupby("a").agg(lambda df: (df.mean() - df.sum()) // 2)
run_and_compare(lambda_func, data=self.data, force_lazy=False)
def not_implemented_func(df, **kwargs):
return df.groupby("a").agg("cumprod")
run_and_compare(lambda_func, data=self.data, force_lazy=False)
@pytest.mark.xfail(
reason="Function specified as a string should be passed into query compiler API, but currently it is transformed into a lambda"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_agg_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).agg("mean")
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
def test_groupby_lazy_multiindex(self):
index = generate_multiindex(len(self.data["a"]))
def groupby(df, *args, **kwargs):
df = df + 1
return df.groupby("a").agg({"b": "size"})
run_and_compare(groupby, data=self.data, constructor_kwargs={"index": index})
def test_groupby_lazy_squeeze(self):
def applier(df, **kwargs):
return df.groupby("a").sum().squeeze(axis=1)
run_and_compare(
applier,
data=self.data,
constructor_kwargs={"columns": ["a", "b"]},
force_lazy=True,
)
@pytest.mark.parametrize("method", ["sum", "size"])
def test_groupby_series(self, method):
def groupby(df, **kwargs):
ser = df[df.columns[0]]
return getattr(ser.groupby(ser), method)()
run_and_compare(groupby, data=self.data)
def test_groupby_size(self):
def groupby(df, **kwargs):
return df.groupby("a").size()
run_and_compare(groupby, data=self.data)
@pytest.mark.parametrize("by", [["a"], ["a", "b", "c"]])
@pytest.mark.parametrize("agg", ["sum", "size"])
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_agg_by_col(self, by, agg, as_index):
def simple_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg(agg)
run_and_compare(simple_agg, data=self.data)
def dict_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({by[0]: agg})
run_and_compare(dict_agg, data=self.data)
def dict_agg_all_cols(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({col: agg for col in by})
run_and_compare(dict_agg_all_cols, data=self.data)
# modin-issue#3461
def test_groupby_pure_by(self):
data = [1, 1, 2, 2]
# Test when 'by' is a 'TransformNode'
run_and_compare(lambda df: df.groupby(df).sum(), data=data, force_lazy=True)
# Test when 'by' is a 'FrameNode'
md_ser, pd_ser = pd.Series(data), pandas.Series(data)
md_ser._query_compiler._modin_frame._execute()
assert isinstance(
md_ser._query_compiler._modin_frame._op, FrameNode
), "Triggering execution of the Modin frame supposed to set 'FrameNode' as a frame's op"
set_execution_mode(md_ser, "lazy")
md_res = md_ser.groupby(md_ser).sum()
set_execution_mode(md_res, None)
pd_res = pd_ser.groupby(pd_ser).sum()
df_equals(md_res, pd_res)
taxi_data = {
"a": [1, 1, 2, 2],
"b": [11, 21, 12, 11],
"c": pandas.to_datetime(
["20190902", "20180913", "20190921", "20180903"], format="%Y%m%d"
),
"d": [11.5, 21.2, 12.8, 13.4],
}
# TODO: emulate taxi queries with group by category types when we have loading
# using arrow
# Another way of doing taxi q1 is
# res = df.groupby("cab_type").size() - this should be tested later as well
def test_taxi_q1(self):
def taxi_q1(df, **kwargs):
return df.groupby("a").size()
run_and_compare(taxi_q1, data=self.taxi_data)
def test_taxi_q2(self):
def taxi_q2(df, **kwargs):
return df.groupby("a").agg({"b": "mean"})
run_and_compare(taxi_q2, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q3(self, as_index):
def taxi_q3(df, as_index, **kwargs):
return df.groupby(["b", df["c"].dt.year], as_index=as_index).size()
run_and_compare(taxi_q3, data=self.taxi_data, as_index=as_index)
def test_groupby_expr_col(self):
def groupby(df, **kwargs):
df = df.loc[:, ["b", "c"]]
df["year"] = df["c"].dt.year
df["month"] = df["c"].dt.month
df["id1"] = df["year"] * 12 + df["month"]
df["id2"] = (df["id1"] - 24000) // 12
df = df.groupby(["id1", "id2"], as_index=False).agg({"b": "max"})
return df
run_and_compare(groupby, data=self.taxi_data)
def test_series_astype(self):
def series_astype(df, **kwargs):
return df["d"].astype("int")
run_and_compare(series_astype, data=self.taxi_data)
def test_df_astype(self):
def df_astype(df, **kwargs):
return df.astype({"b": "float", "d": "int"})
run_and_compare(df_astype, data=self.taxi_data)
def test_df_indexed_astype(self):
def df_astype(df, **kwargs):
df = df.groupby("a").agg({"b": "sum"})
return df.astype({"b": "float"})
run_and_compare(df_astype, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q4(self, as_index):
def taxi_q4(df, **kwargs):
df["c"] = df["c"].dt.year
df["d"] = df["d"].astype("int64")
df = df.groupby(["b", "c", "d"], sort=True, as_index=as_index).size()
if as_index:
df = df.reset_index()
return df.sort_values(
by=["c", 0 if as_index else "size"],
ignore_index=True,
ascending=[True, False],
)
run_and_compare(taxi_q4, data=self.taxi_data)
h2o_data = {
"id1": ["id1", "id2", "id3", "id1", "id2", "id3", "id1", "id2", "id3", "id1"],
"id2": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
"id3": ["id4", "id5", "id6", "id4", "id5", "id6", "id4", "id5", "id6", "id4"],
"id4": [4, 5, 4, 5, 4, 5, 4, 5, 4, 5],
"id5": [7, 8, 9, 7, 8, 9, 7, 8, 9, 7],
"id6": [7, 8, 7, 8, 7, 8, 7, 8, 7, 8],
"v1": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"v2": [1, 3, 5, 7, 9, 10, 8, 6, 4, 2],
"v3": [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0],
}
def _get_h2o_df(self):
df = pandas.DataFrame(self.h2o_data)
df["id1"] = df["id1"].astype("category")
df["id2"] = df["id2"].astype("category")
df["id3"] = df["id3"].astype("category")
return df
def test_h2o_q1(self):
df = self._get_h2o_df()
ref = df.groupby(["id1"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
df_equals(ref, exp)
def test_h2o_q2(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1", "id2"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
df_equals(ref, exp)
def test_h2o_q3(self):
df = self._get_h2o_df()
ref = df.groupby(["id3"], observed=True).agg({"v1": "sum", "v3": "mean"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True, as_index=False).agg(
{"v1": "sum", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q4(self):
df = self._get_h2o_df()
ref = df.groupby(["id4"], observed=True).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id4"], observed=True, as_index=False).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
df_equals(ref, exp)
def test_h2o_q5(self):
df = self._get_h2o_df()
ref = df.groupby(["id6"], observed=True).agg(
{"v1": "sum", "v2": "sum", "v3": "sum"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id6"], observed=True, as_index=False).agg(
{"v1": "sum", "v2": "sum", "v3": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
df_equals(ref, exp)
def test_h2o_q7(self):
df = self._get_h2o_df()
ref = (
df.groupby(["id3"], observed=True)
.agg({"v1": "max", "v2": "min"})
.assign(range_v1_v2=lambda x: x["v1"] - x["v2"])[["range_v1_v2"]]
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True).agg(
{"v1": "max", "v2": "min"}
)
modin_df["range_v1_v2"] = modin_df["v1"] - modin_df["v2"]
modin_df = modin_df[["range_v1_v2"]]
modin_df.reset_index(inplace=True)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q10(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2", "id3", "id4", "id5", "id6"], observed=True).agg(
{"v3": "sum", "v1": "count"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
modin_df = modin_df.groupby(
["id1", "id2", "id3", "id4", "id5", "id6"], observed=True
).agg({"v3": "sum", "v1": "count"})
modin_df.reset_index(inplace=True)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
std_data = {
"a": [1, 2, 1, 1, 1, 2, 2, 2, 1, 2],
"b": [4, 3, 1, 6, 9, 8, 0, 9, 5, 13],
"c": [12.8, 45.6, 23.5, 12.4, 11.2, None, 56.4, 12.5, 1, 55],
}
def test_agg_std(self):
def std(df, **kwargs):
df = df.groupby("a").agg({"b": "std", "c": "std"})
if not isinstance(df, pandas.DataFrame):
df = to_pandas(df)
df["b"] = df["b"].apply(lambda x: round(x, 10))
df["c"] = df["c"].apply(lambda x: round(x, 10))
return df
run_and_compare(std, data=self.std_data, force_lazy=False)
skew_data = {
"a": [1, 2, 1, 1, 1, 2, 2, 2, 1, 2, 3, 4, 4],
"b": [4, 3, 1, 6, 9, 8, 0, 9, 5, 13, 12, 44, 6],
"c": [12.8, 45.6, 23.5, 12.4, 11.2, None, 56.4, 12.5, 1, 55, 4.5, 7.8, 9.4],
}
def test_agg_skew(self):
def std(df, **kwargs):
df = df.groupby("a").agg({"b": "skew", "c": "skew"})
if not isinstance(df, pandas.DataFrame):
df = to_pandas(df)
df["b"] = df["b"].apply(lambda x: round(x, 10))
df["c"] = df["c"].apply(lambda x: round(x, 10))
return df
run_and_compare(std, data=self.skew_data, force_lazy=False)
def test_multilevel(self):
def groupby(df, **kwargs):
return df.groupby("a").agg({"b": "min", "c": ["min", "max", "sum", "skew"]})
run_and_compare(groupby, data=self.data)
class TestAgg:
data = {
"a": [1, 2, None, None, 1, None],
"b": [10, 20, None, 20, 10, None],
"c": [None, 200, None, 400, 500, 600],
"d": [11, 22, 33, 22, 33, 22],
}
int_data = pandas.DataFrame(data).fillna(0).astype("int").to_dict()
@pytest.mark.parametrize("agg", ["max", "min", "sum", "mean"])
@pytest.mark.parametrize("skipna", bool_arg_values)
def test_simple_agg(self, agg, skipna):
def apply(df, agg, skipna, **kwargs):
return getattr(df, agg)(skipna=skipna)
run_and_compare(apply, data=self.data, agg=agg, skipna=skipna, force_lazy=False)
def test_count_agg(self):
def apply(df, **kwargs):
return df.count()
run_and_compare(apply, data=self.data, force_lazy=False)
@pytest.mark.parametrize("data", [data, int_data], ids=["nan_data", "int_data"])
@pytest.mark.parametrize("cols", ["a", "d", ["a", "d"]])
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("sort", [True])
@pytest.mark.parametrize("ascending", [True, False])
def test_value_counts(self, data, cols, dropna, sort, ascending):
def value_counts(df, cols, dropna, sort, ascending, **kwargs):
return df[cols].value_counts(dropna=dropna, sort=sort, ascending=ascending)
if dropna and pandas.DataFrame(
data, columns=cols if is_list_like(cols) else [cols]
).isna().any(axis=None):
pytest.xfail(
reason="'dropna' parameter is forcibly disabled in OmniSci's GroupBy"
"due to performance issues, you can track this problem at:"
"https://github.com/modin-project/modin/issues/2896"
)
# Custom comparator is required because pandas is inconsistent about
# the order of equal values, we can't match this behaviour. For more details:
# https://github.com/modin-project/modin/issues/1650
run_and_compare(
value_counts,
data=data,
cols=cols,
dropna=dropna,
sort=sort,
ascending=ascending,
comparator=df_equals_with_non_stable_indices,
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "max", "min", "count", "nunique"]
)
def test_simple_agg_no_default(self, method):
def applier(df, **kwargs):
if isinstance(df, pd.DataFrame):
# At the end of reduction function it does inevitable `transpose`, which
# is defaulting to pandas. The following logic check that `transpose` is the only
# function that falling back to pandas in the reduction operation flow.
with pytest.warns(UserWarning) as warns:
res = getattr(df, method)()
assert (
len(warns) == 1
), f"More than one warning were arisen: len(warns) != 1 ({len(warns)} != 1)"
message = warns[0].message.args[0]
assert (
re.match(r".*transpose.*defaulting to pandas", message) is not None
), f"Expected DataFrame.transpose defaulting to pandas warning, got: {message}"
else:
res = getattr(df, method)()
return res
run_and_compare(applier, data=self.data, force_lazy=False)
@pytest.mark.parametrize("data", [data, int_data])
@pytest.mark.parametrize("dropna", bool_arg_values)
def test_nunique(self, data, dropna):
def applier(df, **kwargs):
return df.nunique(dropna=dropna)
run_and_compare(applier, data=data, force_lazy=False)
class TestMerge:
data = {
"a": [1, 2, 3, 6, 5, 4],
"b": [10, 20, 30, 60, 50, 40],
"e": [11, 22, 33, 66, 55, 44],
}
data2 = {
"a": [4, 2, 3, 7, 1, 5],
"b": [40, 20, 30, 70, 10, 50],
"d": [4000, 2000, 3000, 7000, 1000, 5000],
}
on_values = ["a", ["a"], ["a", "b"], ["b", "a"], None]
how_values = ["inner", "left"]
@pytest.mark.parametrize("on", on_values)
@pytest.mark.parametrize("how", how_values)
@pytest.mark.parametrize("sort", [True, False])
def test_merge(self, on, how, sort):
def merge(lib, df1, df2, on, how, sort, **kwargs):
return df1.merge(df2, on=on, how=how, sort=sort)
run_and_compare(
merge, data=self.data, data2=self.data2, on=on, how=how, sort=sort
)
def test_merge_non_str_column_name(self):
def merge(lib, df1, df2, on, **kwargs):
return df1.merge(df2, on=on, how="inner")
run_and_compare(merge, data=[[1, 2], [3, 4]], data2=[[1, 2], [3, 4]], on=1)
h2o_data = {
"id1": ["id1", "id10", "id100", "id1000"],
"id2": ["id2", "id20", "id200", "id2000"],
"id3": ["id3", "id30", "id300", "id3000"],
"id4": [4, 40, 400, 4000],
"id5": [5, 50, 500, 5000],
"id6": [6, 60, 600, 6000],
"v1": [3.3, 4.4, 7.7, 8.8],
}
h2o_data_small = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id4": [40, 400, 4000, 40000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
h2o_data_medium = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id2": ["id20", "id200", "id2000", "id20000"],
"id4": [40, 400, 4000, 40000],
"id5": [50, 500, 5000, 50000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
h2o_data_big = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id2": ["id20", "id200", "id2000", "id20000"],
"id3": ["id30", "id300", "id3000", "id30000"],
"id4": [40, 400, 4000, 40000],
"id5": [50, 500, 5000, 50000],
"id6": [60, 600, 6000, 60000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
def _get_h2o_df(self, data):
df = pandas.DataFrame(data)
if "id1" in data:
df["id1"] = df["id1"].astype("category")
if "id2" in data:
df["id2"] = df["id2"].astype("category")
if "id3" in data:
df["id3"] = df["id3"].astype("category")
return df
# Currently OmniSci returns category as string columns
# and therefore casted to category it would only have
# values from actual data. In Pandas category would
# have old values as well. Simply casting category
# to string for somparison doesn't work because None
# casted to category and back to strting becomes
# "nan". So we cast everything to category and then
# to string.
def _fix_category_cols(self, df):
if "id1" in df.columns:
df["id1"] = df["id1"].astype("category")
df["id1"] = df["id1"].astype(str)
if "id1_x" in df.columns:
df["id1_x"] = df["id1_x"].astype("category")
df["id1_x"] = df["id1_x"].astype(str)
if "id1_y" in df.columns:
df["id1_y"] = df["id1_y"].astype("category")
df["id1_y"] = df["id1_y"].astype(str)
if "id2" in df.columns:
df["id2"] = df["id2"].astype("category")
df["id2"] = df["id2"].astype(str)
if "id2_x" in df.columns:
df["id2_x"] = df["id2_x"].astype("category")
df["id2_x"] = df["id2_x"].astype(str)
if "id2_y" in df.columns:
df["id2_y"] = df["id2_y"].astype("category")
df["id2_y"] = df["id2_y"].astype(str)
if "id3" in df.columns:
df["id3"] = df["id3"].astype("category")
df["id3"] = df["id3"].astype(str)
def test_h2o_q1(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_small)
ref = lhs.merge(rhs, on="id1")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id1")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q2(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, on="id2")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id2")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q3(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, how="left", on="id2")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, how="left", on="id2")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q4(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, on="id5")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id5")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q5(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_big)
ref = lhs.merge(rhs, on="id3")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id3")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
dt_data1 = {
"id": [1, 2],
"timestamp": pandas.to_datetime(["20000101", "20000201"], format="%Y%m%d"),
}
dt_data2 = {"id": [1, 2], "timestamp_year": [2000, 2000]}
def test_merge_dt(self):
def merge(df1, df2, **kwargs):
df1["timestamp_year"] = df1["timestamp"].dt.year
res = df1.merge(df2, how="left", on=["id", "timestamp_year"])
res["timestamp_year"] = res["timestamp_year"].fillna(np.int64(-1))
return res
run_and_compare(merge, data=self.dt_data1, data2=self.dt_data2)
left_data = {"a": [1, 2, 3, 4], "b": [10, 20, 30, 40], "c": [11, 12, 13, 14]}
right_data = {"c": [1, 2, 3, 4], "b": [10, 20, 30, 40], "d": [100, 200, 300, 400]}
@pytest.mark.parametrize("how", how_values)
@pytest.mark.parametrize(
"left_on, right_on", [["a", "c"], [["a", "b"], ["c", "b"]]]
)
def test_merge_left_right_on(self, how, left_on, right_on):
def merge(df1, df2, how, left_on, right_on, **kwargs):
return df1.merge(df2, how=how, left_on=left_on, right_on=right_on)
run_and_compare(
merge,
data=self.left_data,
data2=self.right_data,
how=how,
left_on=left_on,
right_on=right_on,
)
run_and_compare(
merge,
data=self.right_data,
data2=self.left_data,
how=how,
left_on=right_on,
right_on=left_on,
)
class TestBinaryOp:
data = {
"a": [1, 1, 1, 1, 1],
"b": [10, 10, 10, 10, 10],
"c": [100, 100, 100, 100, 100],
"d": [1000, 1000, 1000, 1000, 1000],
}
data2 = {
"a": [1, 1, 1, 1, 1],
"f": [2, 2, 2, 2, 2],
"b": [3, 3, 3, 3, 3],
"d": [4, 4, 4, 4, 4],
}
fill_values = [None, 1]
def test_binary_level(self):
def applier(df1, df2, **kwargs):
df2.index = generate_multiindex(len(df2))
return df1.add(df2, level=1)
# setting `force_lazy=False`, because we're expecting to fallback
# to pandas in that case, which is not supported in lazy mode
run_and_compare(applier, data=self.data, data2=self.data, force_lazy=False)
def test_add_cst(self):
def add(lib, df):
return df + 1
run_and_compare(add, data=self.data)
def test_add_list(self):
def add(lib, df):
return df + [1, 2, 3, 4]
run_and_compare(add, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_add_method_columns(self, fill_value):
def add1(lib, df, fill_value):
return df["a"].add(df["b"], fill_value=fill_value)
def add2(lib, df, fill_value):
return df[["a", "c"]].add(df[["b", "a"]], fill_value=fill_value)
run_and_compare(add1, data=self.data, fill_value=fill_value)
run_and_compare(add2, data=self.data, fill_value=fill_value)
def test_add_columns(self):
def add1(lib, df):
return df["a"] + df["b"]
def add2(lib, df):
return df[["a", "c"]] + df[["b", "a"]]
run_and_compare(add1, data=self.data)
run_and_compare(add2, data=self.data)
def test_add_columns_and_assign(self):
def add(lib, df):
df["sum"] = df["a"] + df["b"]
return df
run_and_compare(add, data=self.data)
def test_add_columns_and_assign_to_existing(self):
def add(lib, df):
df["a"] = df["a"] + df["b"]
return df
run_and_compare(add, data=self.data)
def test_mul_cst(self):
def mul(lib, df):
return df * 2
run_and_compare(mul, data=self.data)
def test_mul_list(self):
def mul(lib, df):
return df * [2, 3, 4, 5]
run_and_compare(mul, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_mul_method_columns(self, fill_value):
def mul1(lib, df, fill_value):
return df["a"].mul(df["b"], fill_value=fill_value)
def mul2(lib, df, fill_value):
return df[["a", "c"]].mul(df[["b", "a"]], fill_value=fill_value)
run_and_compare(mul1, data=self.data, fill_value=fill_value)
run_and_compare(mul2, data=self.data, fill_value=fill_value)
def test_mul_columns(self):
def mul1(lib, df):
return df["a"] * df["b"]
def mul2(lib, df):
return df[["a", "c"]] * df[["b", "a"]]
run_and_compare(mul1, data=self.data)
run_and_compare(mul2, data=self.data)
def test_mod_cst(self):
def mod(lib, df):
return df % 2
run_and_compare(mod, data=self.data)
def test_mod_list(self):
def mod(lib, df):
return df % [2, 3, 4, 5]
run_and_compare(mod, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_mod_method_columns(self, fill_value):
def mod1(lib, df, fill_value):
return df["a"].mod(df["b"], fill_value=fill_value)
def mod2(lib, df, fill_value):
return df[["a", "c"]].mod(df[["b", "a"]], fill_value=fill_value)
run_and_compare(mod1, data=self.data, fill_value=fill_value)
run_and_compare(mod2, data=self.data, fill_value=fill_value)
def test_mod_columns(self):
def mod1(lib, df):
return df["a"] % df["b"]
def mod2(lib, df):
return df[["a", "c"]] % df[["b", "a"]]
run_and_compare(mod1, data=self.data)
run_and_compare(mod2, data=self.data)
def test_truediv_cst(self):
def truediv(lib, df):
return df / 2
run_and_compare(truediv, data=self.data)
def test_truediv_list(self):
def truediv(lib, df):
return df / [1, 0.5, 0.2, 2.0]
run_and_compare(truediv, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_truediv_method_columns(self, fill_value):
def truediv1(lib, df, fill_value):
return df["a"].truediv(df["b"], fill_value=fill_value)
def truediv2(lib, df, fill_value):
return df[["a", "c"]].truediv(df[["b", "a"]], fill_value=fill_value)
run_and_compare(truediv1, data=self.data, fill_value=fill_value)
run_and_compare(truediv2, data=self.data, fill_value=fill_value)
def test_truediv_columns(self):
def truediv1(lib, df):
return df["a"] / df["b"]
def truediv2(lib, df):
return df[["a", "c"]] / df[["b", "a"]]
run_and_compare(truediv1, data=self.data)
run_and_compare(truediv2, data=self.data)
def test_floordiv_cst(self):
def floordiv(lib, df):
return df // 2
run_and_compare(floordiv, data=self.data)
def test_floordiv_list(self):
def floordiv(lib, df):
return df // [1, 0.54, 0.24, 2.01]
run_and_compare(floordiv, data=self.data)
@pytest.mark.parametrize("fill_value", fill_values)
def test_floordiv_method_columns(self, fill_value):
def floordiv1(lib, df, fill_value):
return df["a"].floordiv(df["b"], fill_value=fill_value)
def floordiv2(lib, df, fill_value):
return df[["a", "c"]].floordiv(df[["b", "a"]], fill_value=fill_value)
run_and_compare(floordiv1, data=self.data, fill_value=fill_value)
run_and_compare(floordiv2, data=self.data, fill_value=fill_value)
def test_floordiv_columns(self):
def floordiv1(lib, df):
return df["a"] // df["b"]
def floordiv2(lib, df):
return df[["a", "c"]] // df[["b", "a"]]
run_and_compare(floordiv1, data=self.data)
run_and_compare(floordiv2, data=self.data)
cmp_data = {
"a": [1, 2, 3, 4, 5],
"b": [10, 20, 30, 40, 50],
"c": [50.0, 40.0, 30.1, 20.0, 10.0],
}
cmp_fn_values = ["eq", "ne", "le", "lt", "ge", "gt"]
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
def test_cmp_cst(self, cmp_fn):
def cmp1(df, cmp_fn, **kwargs):
return getattr(df["a"], cmp_fn)(3)
def cmp2(df, cmp_fn, **kwargs):
return getattr(df, cmp_fn)(30)
run_and_compare(cmp1, data=self.cmp_data, cmp_fn=cmp_fn)
run_and_compare(cmp2, data=self.cmp_data, cmp_fn=cmp_fn)
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
def test_cmp_list(self, cmp_fn):
def cmp(df, cmp_fn, **kwargs):
return getattr(df, cmp_fn)([3, 30, 30.1])
run_and_compare(cmp, data=self.cmp_data, cmp_fn=cmp_fn)
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
def test_cmp_cols(self, cmp_fn):
def cmp1(df, cmp_fn, **kwargs):
return getattr(df["b"], cmp_fn)(df["c"])
def cmp2(df, cmp_fn, **kwargs):
return getattr(df[["b", "c"]], cmp_fn)(df[["a", "b"]])
run_and_compare(cmp1, data=self.cmp_data, cmp_fn=cmp_fn)
run_and_compare(cmp2, data=self.cmp_data, cmp_fn=cmp_fn)
@pytest.mark.parametrize("cmp_fn", cmp_fn_values)
@pytest.mark.parametrize("value", [2, 2.2, "a"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_cmp_mixed_types(self, cmp_fn, value, data):
def cmp(df, cmp_fn, value, **kwargs):
return getattr(df, cmp_fn)(value)
run_and_compare(cmp, data=data, cmp_fn=cmp_fn, value=value)
def test_filter_dtypes(self):
def filter(df, **kwargs):
return df[df.a < 4].dtypes
run_and_compare(filter, data=self.cmp_data)
@pytest.mark.xfail(
reason="Requires fix in OmniSci: https://github.com/intel-ai/omniscidb/pull/178"
)
def test_filter_empty_result(self):
def filter(df, **kwargs):
return df[df.a < 0]
run_and_compare(filter, data=self.cmp_data)
def test_complex_filter(self):
def filter_and(df, **kwargs):
return df[(df.a < 5) & (df.b > 20)]
def filter_or(df, **kwargs):
return df[(df.a < 3) | (df.b > 40)]
run_and_compare(filter_and, data=self.cmp_data)
run_and_compare(filter_or, data=self.cmp_data)
class TestDateTime:
datetime_data = {
"a": [1, 1, 2, 2],
"b": [11, 21, 12, 11],
"c": pandas.to_datetime(
["20190902", "20180913", "20190921", "20180903"], format="%Y%m%d"
),
}
def test_dt_year(self):
def dt_year(df, **kwargs):
return df["c"].dt.year
run_and_compare(dt_year, data=self.datetime_data)
def test_dt_month(self):
def dt_month(df, **kwargs):
return df["c"].dt.month
run_and_compare(dt_month, data=self.datetime_data)
def test_dt_day(self):
def dt_day(df, **kwargs):
return df["c"].dt.day
run_and_compare(dt_day, data=self.datetime_data)
class TestCategory:
data = {
"a": ["str1", "str2", "str1", "str3", "str2", None],
}
def test_cat_codes(self):
pandas_df = pandas.DataFrame(self.data)
pandas_df["a"] = pandas_df["a"].astype("category")
modin_df = pd.DataFrame(pandas_df)
modin_df["a"] = modin_df["a"].cat.codes
exp = to_pandas(modin_df)
pandas_df["a"] = pandas_df["a"].cat.codes
df_equals(pandas_df, exp)
class TestSort:
data = {
"a": [1, 2, 5, 2, 5, 4, 4, 5, 2],
"b": [1, 2, 3, 6, 5, 1, 4, 5, 3],
"c": [5, 4, 2, 3, 1, 1, 4, 5, 6],
"d": ["1", "4", "3", "2", "1", "6", "7", "5", "0"],
}
data_nulls = {
"a": [1, 2, 5, 2, 5, 4, 4, None, 2],
"b": [1, 2, 3, 6, 5, None, 4, 5, 3],
"c": [None, 4, 2, 3, 1, 1, 4, 5, 6],
}
data_multiple_nulls = {
"a": [1, 2, None, 2, 5, 4, 4, None, 2],
"b": [1, 2, 3, 6, 5, None, 4, 5, None],
"c": [None, 4, 2, None, 1, 1, 4, 5, 6],
}
cols_values = ["a", ["a", "b"], ["b", "a"], ["c", "a", "b"]]
index_cols_values = [None, "a", ["a", "b"]]
ascending_values = [True, False]
ascending_list_values = [[True, False], [False, True]]
na_position_values = ["first", "last"]
@pytest.mark.parametrize("cols", cols_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
@pytest.mark.parametrize("ascending", ascending_values)
@pytest.mark.parametrize("index_cols", index_cols_values)
def test_sort_cols(self, cols, ignore_index, index_cols, ascending):
def sort(df, cols, ignore_index, index_cols, ascending, **kwargs):
if index_cols:
df = df.set_index(index_cols)
return df.sort_values(cols, ignore_index=ignore_index, ascending=ascending)
run_and_compare(
sort,
data=self.data,
cols=cols,
ignore_index=ignore_index,
index_cols=index_cols,
ascending=ascending,
# we're expecting to fallback to pandas in that case,
# which is not supported in lazy mode
force_lazy=(index_cols is None),
)
@pytest.mark.parametrize("ascending", ascending_list_values)
def test_sort_cols_asc_list(self, ascending):
def sort(df, ascending, **kwargs):
return df.sort_values(["a", "b"], ascending=ascending)
run_and_compare(
sort,
data=self.data,
ascending=ascending,
)
@pytest.mark.parametrize("ascending", ascending_values)
def test_sort_cols_str(self, ascending):
def sort(df, ascending, **kwargs):
return df.sort_values("d", ascending=ascending)
run_and_compare(
sort,
data=self.data,
ascending=ascending,
)
@pytest.mark.parametrize("cols", cols_values)
@pytest.mark.parametrize("ascending", ascending_values)
@pytest.mark.parametrize("na_position", na_position_values)
def test_sort_cols_nulls(self, cols, ascending, na_position):
def sort(df, cols, ascending, na_position, **kwargs):
return df.sort_values(cols, ascending=ascending, na_position=na_position)
run_and_compare(
sort,
data=self.data_nulls,
cols=cols,
ascending=ascending,
na_position=na_position,
)
# Issue #1767 - rows order is not preserved for NULL keys
# @pytest.mark.parametrize("cols", cols_values)
# @pytest.mark.parametrize("ascending", ascending_values)
# @pytest.mark.parametrize("na_position", na_position_values)
# def test_sort_cols_multiple_nulls(self, cols, ascending, na_position):
# def sort(df, cols, ascending, na_position, **kwargs):
# return df.sort_values(cols, ascending=ascending, na_position=na_position)
#
# run_and_compare(
# sort,
# data=self.data_multiple_nulls,
# cols=cols,
# ascending=ascending,
# na_position=na_position,
# )
class TestBadData:
bad_for_arrow = {
"a": ["a", [[1, 2], [3]], [3, 4]],
"b": ["b", [1, 2], [3, 4]],
"c": ["1", "2", 3],
}
bad_for_omnisci = {
"b": [[1, 2], [3, 4], [5, 6]],
"c": ["1", "2", "3"],
}
ok_data = {"d": np.arange(3), "e": np.arange(3), "f": np.arange(3)}
def _get_pyarrow_table(self, obj):
if not isinstance(obj, (pandas.DataFrame, pandas.Series)):
obj = pandas.DataFrame(obj)
return pyarrow.Table.from_pandas(obj)
@pytest.mark.parametrize("data", [bad_for_arrow, bad_for_omnisci])
def test_construct(self, data):
def applier(df, *args, **kwargs):
return repr(df)
run_and_compare(applier, data=data, force_lazy=False)
def test_from_arrow(self):
at = self._get_pyarrow_table(self.bad_for_omnisci)
pd_df = pandas.DataFrame(self.bad_for_omnisci)
md_df = pd.utils.from_arrow(at)
# force materialization
repr(md_df)
df_equals(md_df, pd_df)
@pytest.mark.parametrize("data", [bad_for_arrow, bad_for_omnisci])
def test_methods(self, data):
def applier(df, *args, **kwargs):
return df.T.drop(columns=[0])
run_and_compare(applier, data=data, force_lazy=False)
def test_with_normal_frame(self):
def applier(df1, df2, *args, **kwargs):
return df2.join(df1)
run_and_compare(
applier, data=self.bad_for_omnisci, data2=self.ok_data, force_lazy=False
)
def test_heterogenous_fillna(self):
def fillna(df, **kwargs):
return df["d"].fillna("a")
run_and_compare(fillna, data=self.ok_data, force_lazy=False)
class TestDropna:
data = {
"col1": [1, 2, None, 2, 1],
"col2": [None, 3, None, 2, 1],
"col3": [2, 3, 4, None, 5],
"col4": [1, 2, 3, 4, 5],
}
@pytest.mark.parametrize("subset", [None, ["col1", "col2"]])
@pytest.mark.parametrize("how", ["all", "any"])
def test_dropna(self, subset, how):
def applier(df, *args, **kwargs):
return df.dropna(subset=subset, how=how)
run_and_compare(applier, data=self.data)
def test_dropna_multiindex(self):
index = generate_multiindex(len(self.data["col1"]))
md_df = pd.DataFrame(self.data, index=index)
pd_df = pandas.DataFrame(self.data, index=index)
md_res = md_df.dropna()._to_pandas()
pd_res = pd_df.dropna()
# HACK: all strings in OmniSci considered to be categories, that breaks
# checks for equality with pandas, this line discards category dtype
md_res.index = pandas.MultiIndex.from_tuples(
md_res.index.values, names=md_res.index.names
)
df_equals(md_res, pd_res)
@pytest.mark.skip("Dropna logic for GroupBy is disabled for now")
@pytest.mark.parametrize("by", ["col1", ["col1", "col2"], ["col1", "col4"]])
@pytest.mark.parametrize("dropna", [True, False])
def test_dropna_groupby(self, by, dropna):
def applier(df, *args, **kwargs):
# OmniSci engine preserves NaNs at the result of groupby,
# so replacing NaNs with '0' to match with Pandas.
# https://github.com/modin-project/modin/issues/2878
return df.groupby(by=by, dropna=dropna).sum().fillna(0)
run_and_compare(applier, data=self.data)
class TestUnsupportedColumns:
@pytest.mark.parametrize(
"data,is_good",
[
[["1", "2", None, "2", "1"], True],
[[None, "3", None, "2", "1"], True],
[[1, "2", None, "2", "1"], False],
[[None, 3, None, "2", "1"], False],
],
)
def test_unsupported_columns(self, data, is_good):
pandas_df = pandas.DataFrame({"col": data})
obj, bad_cols = OmnisciOnNativeDataframePartitionManager._get_unsupported_cols(
pandas_df
)
if is_good:
assert obj and not bad_cols
else:
assert not obj and bad_cols == ["col"]
class TestConstructor:
@pytest.mark.parametrize(
"index",
[
None,
pandas.Index([1, 2, 3]),
|
pandas.MultiIndex.from_tuples([(1, 1), (2, 2), (3, 3)])
|
pandas.MultiIndex.from_tuples
|
import matplotlib
import matplotlib.pylab as plt
import os
import seaborn as sns
import pandas as pd
import itertools
import numpy as np
def plot_graph(data, baseline, plot_name, figsize, legend):
"""
Plot the input data to latex compatible .pgg format.
"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# sns.set()
sns.set_context("paper")
# sns.set(rc={'figure.figsize':figsize})
palette = 'summer' #['copper_r', 'BuPu'afmhot_r cool_r] https://medium.com/@morganjonesartist/color-guide-to-seaborn-palettes-da849406d44f
sns.set_theme(style="whitegrid")
g = sns.catplot(data=data, kind="bar", x='model', y='score', hue="Metric", ci='sd', palette=palette, legend=legend, legend_out=True, height=figsize[1], aspect=figsize[0]/figsize[1])
g.despine(left=True)
g.set(ylim=(0, .1))
g.map(plt.axhline, y=baseline, color='purple', linestyle='dotted')
# plt.legend(loc='upper right', title='Metric')
plt.xlabel('')
plt.ylabel('Score')
# plt.title(t_name.replace('_', ' ').title())
folder = os.path.dirname(os.path.abspath(__file__)) + '/plots/'
if not os.path.isdir(folder):
os.makedirs(folder)
# plt.savefig(folder + '{}.pgf'.format(plot_name))
plt.savefig(folder + '{}{}.png'.format(plot_name, '' if legend else '_wol'), bbox_inches='tight')
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
col_names = ['hits@1','hits@10','hits@3','mrr']
legend = True
for ds in ['fb', 'wn']:
temp_names =['']
df_list = list()
df_plot = pd.DataFrame()
baseline = 0.006
if ds == 'wn':
legend = False
baseline = 0.0015
plot_name = 'lp_{}'.format(ds)
textwidth_in = 6.69423
figsize = [textwidth_in * 0.8, textwidth_in * .5]
for model in ['GVAE', 'GCVAE']:
df_1 = pd.read_csv('graphs/data/LP/lp_{}_{}.csv'.format(model, ds))
df_temp = df_1[[col for col in df_1.columns if '_temp' in col ]]
df_temp.drop([col for col in df_temp.columns if ('__MIN' in col or '__MAX' in col)], axis=1, inplace=True)
std = df_temp.std(axis=0).to_list()
std = np.array(std[-1:]+std[:-1])
df_1.drop([col for col in df_1.columns if ('__MIN' in col or '__MAX' in col or '_temp' in col)], axis=1, inplace=True)
df_1.drop(['Step'], axis=1, inplace=True)
df_1 = df_1.rename(columns=dict(zip(df_1.columns, col_names)))
scale = .5 if ds == 'fb' else .6
for n in [0, scale,-scale]:
df_plot['score'] = np.array(df_1.stack([0]).to_list()) + n*std
df_plot['model'] = ['RGVAE' if model=='GVAE' else 'cRGVAE'] * len(df_plot)
df_plot['Metric'] = col_names
df_list.append(df_plot.copy())
# df_1.fillna(method='bfill', inplace=True)
# df_list.append(df_1.loc([metric]) for metric in col_names)
df_plot =
|
pd.concat(df_list, axis=0)
|
pandas.concat
|
"""
Functions to correct and filter data matrix from LC-MS Metabolomics data.
"""
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline, interp1d
from statsmodels.nonparametric.smoothers_lowess import lowess
from typing import List, Callable, Union, Optional
from ._names import *
def input_na(df: pd.DataFrame, classes: pd.Series, mode: str) -> pd.DataFrame:
"""
Fill missing values.
Parameters
----------
df : pd.DataFrame
classes: ps.Series
mode : {'zero', 'mean', 'min'}
Returns
-------
filled : pd.DataFrame
"""
if mode == "zero":
return df.fillna(0)
elif mode == "mean":
return (df.groupby(classes)
.apply(lambda x: x.fillna(x.mean()))
.droplevel(0))
elif mode == "min":
return (df.groupby(classes)
.apply(lambda x: x.fillna(x.min()))
.droplevel(0))
else:
msg = "mode should be `zero`, `mean` or `min`"
raise ValueError(msg)
def average_replicates(data: pd.DataFrame, sample_id: pd.Series,
classes: pd.Series,
process_classes: List[str]) -> pd.DataFrame:
"""
Group samples by id and computes the average.
Parameters
----------
data: pd.DataFrame
sample_id: pd.Series
classes: pd.Series
process_classes: list[str]
Returns
-------
pd.DataFrame
"""
include_samples = classes[classes.isin(process_classes)].index
exclude_samples = classes[~classes.isin(process_classes)].index
mapper = sample_id[include_samples].drop_duplicates()
mapper = pd.Series(data=mapper.index, index=mapper.values)
included_data = data.loc[include_samples, :]
excluded_data = data.loc[exclude_samples, :]
averaged_data = (included_data.groupby(sample_id[include_samples])
.mean())
averaged_data.index = averaged_data.index.map(mapper)
result =
|
pd.concat((averaged_data, excluded_data))
|
pandas.concat
|
""" configuration run result """
import pandas
from datetime import datetime
from decimal import Decimal
from .connection import get_connection
def _get_connection():
_cnxn = get_connection()
return _cnxn
def insert(result):
_cnxn = _get_connection()
cursor = _cnxn.cursor()
with cursor.execute("""
INSERT INTO [dbo].[wsrt_run_result]
(
[TotalNetProfit],
[GrossProfit],
[GrossLoss],
[ProfitFactor],
[ExpectedPayoff],
[AbsoluteDrawdown],
[MaximalDrawdown],
[RelativeDrawdown],
[TotalTrades],
[RunFinishDateTimeUtc]
)
VALUES
(
?,?,?,?,?,?,?,?,?,?
)
""",
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
result[''],
datetime.utcnow()):
pass
_cnxn.commit()
_cnxn.close()
def mark_as_processing(id):
_cnxn = _get_connection()
cursor = _cnxn.cursor()
tsql = "UPDATE [dbo].[wsrt_run_result] SET [RunStartDateTimeUtc] = ? WHERE [ResultId] = ?"
with cursor.execute(tsql, datetime.utcnow(), id):
pass
_cnxn.commit()
_cnxn.close()
def get_for_processing_by_run_id(run_id):
run_result = None
_cnxn = _get_connection()
cursor = _cnxn.cursor()
tsql = """
SELECT TOP 1
[ResultId],
[RunId],
[OptionId]
FROM
[dbo].[wsrt_run_result]
WHERE
[RunId] = ?
AND [RunStartDateTimeUtc] IS NULL
"""
with cursor.execute(tsql, run_id):
row = cursor.fetchone()
if row:
run_result = dict(zip([column[0] for column in cursor.description], row))
if run_result:
# mark result as being processed, so other terminals not to pick it
process_tsql = """
UPDATE [dbo].[wsrt_run_result] SET [RunStartDateTimeUtc] = ?
WHERE [ResultId] = ?
"""
with cursor.execute(process_tsql, datetime.utcnow(), run_result['ResultId']):
_cnxn.commit()
_cnxn.close()
return run_result
def get_completed_run_results_by_configuration_id(configuration_id):
tsql = """
SELECT rr.* from dbo.wsrt_run_result rr
WHERE
rr.RunId
IN (SELECT r.RunId FROM dbo.wsrt_run r WHERE r.ConfigurationId = ?)
AND rr.RunFinishDateTimeUtc IS NOT NULL
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, configuration_id)
rows = [dict(zip([column[0] for column in cursor.description], row)) for row in cursor.fetchall()]
cursor.close()
del cursor
cnxn.close()
return rows
def update_run_result_with_report(report):
""" """
_cnxn = _get_connection()
cursor = _cnxn.cursor()
tsql = """
UPDATE [dbo].[wsrt_run_result] SET
[TotalNetProfit] = ?,
[GrossProfit] = ?,
[GrossLoss] = ?,
[ProfitFactor] = ?,
[ExpectedPayoff] = ?,
[AbsoluteDrawdown] = ?,
[MaximalDrawdown] = ?,
[TotalTrades] = ?,
[RunFinishDateTimeUtc] = ?
WHERE
[ResultId] = ?
"""
cursor.execute(tsql,
Decimal(report['TotalNetProfit']) if report['TotalNetProfit'] is not None else None,
Decimal(report['GrossProfit']) if report['GrossProfit'] is not None else None,
Decimal(report['GrossLoss']) if report['GrossLoss'] is not None else None,
Decimal(report['ProfitFactor']) if report['ProfitFactor'] is not None else None,
Decimal(report['ExpectedPayoff']) if report['ExpectedPayoff'] is not None else None,
Decimal(report['AbsoluteDrawdown']) if report['AbsoluteDrawdown'] is not None else None,
Decimal(report['MaximalDrawdown']) if report['MaximalDrawdown'] is not None else None,
int(report['TotalTrades']) if report['TotalTrades'] is not None else None,
datetime.utcnow(),
report['ResultId'])
for trade in report['Trades']:
add_run_result_trade(cursor, report['ResultId'], trade)
cursor.close()
del cursor
_cnxn.commit()
_cnxn.close()
def add_run_result_trade(cursor, result_id, trade):
tsql = """
INSERT INTO [dbo].[wsrt_run_result_trade]
(
[ResultId],
[OpenTime],
[Type],
[CloseTime],
[Profit]
)
VALUES
(
?,?,?,?,?
)
"""
cursor.execute(tsql, result_id, trade['OpenTime'], trade['Type'], trade['CloseTime'], trade['Profit'])
def get_run_result_trades_by_result_id(result_id):
tsql = """
SELECT
rrt.*
FROM
dbo.wsrt_run_result_trade rrt
WHERE
rrt.ResultId = ?
ORDER BY
rrt.CloseTime ASC
"""
cnxn = _get_connection()
cursor = cnxn.cursor()
cursor.execute(tsql, result_id)
rows = [dict(zip([column[0] for column in cursor.description], row)) for row in cursor.fetchall()]
df =
|
pandas.DataFrame(rows)
|
pandas.DataFrame
|
import logging
import random
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
import pandas as pd
from pydantic import BaseModel, Field, validator
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
import json
from joblib import load
model=load('knn_final.joblib')
df = pd.read_csv("https://raw.githubusercontent.com/BW-pilot/MachineLearning/master/spotify_final.csv")
spotify = df.drop(columns = ['track_id'])
scaler = StandardScaler()
spotify_scaled = scaler.fit_transform(spotify)
log = logging.getLogger(__name__)
router = APIRouter()
def knn_predictor(audio_feats, k=20):
"""
differences_df = knn_predictor(audio_features)
"""
audio_feats_scaled = scaler.transform([audio_feats])
##Nearest Neighbors model
knn = model
# make prediction
prediction = knn.kneighbors(audio_feats_scaled)
# create an index for similar songs
similar_songs_index = prediction[1][0][:k].tolist()
# Create an empty list to store simlar song names
similar_song_ids = []
similar_song_names = []
# loop over the indexes and append song names to empty list above
for i in similar_songs_index:
song_id = df['track_id'].iloc[i]
similar_song_ids.append(song_id)
#################################################
column_names = spotify.columns.tolist()
# put scaled audio features into a dataframe
audio_feats_scaled_df = pd.DataFrame(audio_feats_scaled, columns=column_names)
# create empty list of similar songs' features
similar_songs_features = []
# loop through the indexes of similar songs to get audio features for each
#. similar song
for index in similar_songs_index:
list_of_feats = spotify.iloc[index].tolist()
similar_songs_features.append(list_of_feats)
# scale the features and turn them into a dataframe
similar_feats_scaled = scaler.transform(similar_songs_features)
similar_feats_scaled_df =
|
pd.DataFrame(similar_feats_scaled, columns=column_names)
|
pandas.DataFrame
|
"""Time series feature generator functions."""
from typing import Dict, List, Optional, Union
import holidays
import numpy as np
import pandas as pd
from tsfeast.utils import to_list
def get_busdays_in_month(dt: pd.Timestamp) -> int:
"""
Get the number of business days in a month period, using US holidays.
Parameters
----------
dt: pd.Timestamp
Desired month.
Returns
-------
int
Number of business days in the month.
"""
chooser: Dict[bool, pd.Timestamp] = {
True: dt,
False: dt - pd.tseries.offsets.MonthBegin()
}
month_begin = chooser[dt.is_month_start]
month_end = dt + pd.tseries.offsets.MonthBegin(1) # np.busday_count end date is exclusive
us_holidays = list(holidays.US(years=dt.year).keys())
return np.busday_count(month_begin.date(), month_end.date(), holidays=us_holidays) # type: ignore #pylint: disable=line-too-long # noqa
def get_datetime_features(
data: Union[pd.DataFrame, pd.Series], date_col: Optional[str] = None,
dt_format: Optional[str] = None, freq: Optional[str] = None
) -> pd.DataFrame:
"""
Get features based on datetime index, including year, month, week, weekday, quarter, days in
month, business days in month and leap year.
Parameters
----------
data: pd.DataFrame, pd.Series
Original data.
date_col: Optional[str]
Column name containing date/timestamp.
dt_format: Optional[str]
Date/timestamp format, e.g. `%Y-%m-%d` for `2020-01-31`.
freq: Optional[str]
Date frequency.
Returns
-------
pd.DataFrame
Date features.
"""
if isinstance(data, pd.DataFrame):
if date_col is None:
raise ValueError('`date_col` cannot be none when passing a DataFrame.')
dates = data[date_col]
elif isinstance(data, pd.Series):
dates = data
else:
raise ValueError('`data` must be a DataFrame or Series.')
if not freq:
freq = pd.infer_freq(pd.DatetimeIndex(pd.to_datetime(dates, format=dt_format)))
X_dt = pd.DatetimeIndex(pd.to_datetime(dates, format=dt_format)) # enforce DatetimeIndex
dt_features = pd.DataFrame()
dt_features['year'] = X_dt.year # pylint: disable=no-member
dt_features['quarter'] = X_dt.quarter # pylint: disable=no-member
dt_features['month'] = X_dt.month # pylint: disable=no-member
if freq == 'D':
dt_features['week'] = X_dt.week # pylint: disable=no-member
dt_features['weekday'] = X_dt.weekday # pylint: disable=no-member
if freq and 'M' in freq:
dt_features['days_in_month'] = X_dt.days_in_month # pylint: disable=no-member
dt_features['bdays_in_month'] = pd.Series(X_dt).apply(get_busdays_in_month)
dt_features['leap_year'] = X_dt.is_leap_year.astype(int) # pylint: disable=no-member
dt_features.index = data.index
return dt_features
def get_lag_features(data: pd.DataFrame, n_lags: int) -> pd.DataFrame:
"""
Get n-lagged features for data.
Parameters
----------
data: pd.DataFrame
Original data.
n_lags: int
Number of lags to generate.
Returns
-------
pd.DataFrame
Lagged values of specified dataset.
"""
lags = []
for n in range(1, n_lags+1):
df = data.copy().shift(n)
df.columns = [f'{x}_lag_{n}' for x in data.columns]
lags.append(df)
return pd.concat(lags, axis=1)
def get_rolling_features(data: pd.DataFrame, window_lengths: List[int]) -> pd.DataFrame:
"""
Get rolling metrics (mean, std, min, max) for each specified window length.
Parameters
----------
data: pd.DataFrame
Original data.
window_lengths: List[int]
List of window lengths to generate.
Returns
-------
pd.DataFrame
Rolling mean, std, min and max for each specified window length.
"""
window_lengths = to_list(window_lengths)
df = data.copy()
windows = []
metrics = ['sum', 'mean', 'std', 'min', 'max']
for win in window_lengths:
for m in metrics:
windows.append(
pd.DataFrame(
df.rolling(win).agg(m).values,
columns=[f'{c}_{win}_pd_{m}' for c in df.columns],
index=df.index
)
)
return
|
pd.concat(windows, axis=1)
|
pandas.concat
|
import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked =
|
pd.DataFrame()
|
pandas.DataFrame
|
import sys
import os
import os.path, time
import glob
import datetime
import pandas as pd
import numpy as np
import csv
import featuretools as ft
import pyasx
import pyasx.data.companies
def get_holdings(file):
'''
holdings can come from export or data feed (simple)
'''
simple_csv = False
with open(file, encoding="utf8") as csvfile:
hold = csv.reader(csvfile, delimiter=',', quotechar='|')
line_count = 0
for row in hold:
if line_count == 1:
break
if 'Code' in row:
simple_csv = True
line_count += 1
if simple_csv:
holdings = pd.read_csv(file, header=0)
else:
holdings = pd.read_csv(file, skiprows=[0, 1, 3], header=0)
cols = [9, 10]
holdings.drop(holdings.columns[cols], axis=1, inplace=True)
holdings = holdings[:-4]
holdings = holdings.rename(columns={
'Purchase($)': 'Purchase $',
'Last($)': 'Last $',
'Mkt Value($)': 'Mkt Value $',
'Profit / Loss($)': 'Profit/Loss $',
'Profit / Loss(%)': 'P/L %',
'Change($)': 'Change $',
'Chg Value($)':'Value Chg $'
})
return holdings
def get_holdings_frame(data_path):
''' get holdings along a time series '''
pkl = data_path + 'Holdings.pkl'
df = None
holdings_files = [f for f in glob.glob(data_path + 'Holdings*.csv')]
for f in holdings_files:
modified = time.ctime(os.path.getmtime(f))
mod = datetime.datetime.strptime(modified, "%a %b %d %H:%M:%S %Y")
#print(f)
df = get_holdings(f)
df['Date'] = mod.date()
if 'Code' in df.columns:
df = df.rename(columns={'Code': 'Tick'})
df = df[df['Avail Units'].notnull()]
existing = pd.read_pickle(pkl) if os.path.isfile(pkl) else pd.DataFrame()
try:
has_holdings = existing[existing['Date'] == mod.date()] if not existing.empty else None
if not existing.empty and has_holdings.Tick.count() > 0:
continue
df = df.append(existing, ignore_index=True)
df.to_pickle(pkl)
except Exception as ex:
print('no pkl exists', str(ex))
df = df.sort_values(by=['Date', 'Tick'], ascending=False)
return df
def holdings(data_path, latest=True):
''' get the pickled holding data set '''
holding = pd.read_pickle(f'{data_path}Holdings.pkl')
holding['index'] = holding.index
return holding if not latest else holding[holding.Date == holding.Date.max()]
def get_transactions(file):
trans = pd.read_csv(file)
trans = trans.rename(columns={
'Detail': 'Details',
'Credit ($)':'Credit($)',
'Debit ($)':'Debit($)',
'Balance ($)':'Balance($)'
})
trans = trans.loc[trans.Details.str.startswith('B', na=False) | trans.Details.str.startswith('S', na=False)]
#del trans['Type']
trans.drop(trans.columns[-1], axis=1)
trans["Qty"] = trans["Details"].str.split(' ').str[1]
trans["Tick"] = trans["Details"].str.split(' ').str[2]
trans["Price"] = trans["Details"].str.split(' ').str[4]
trans["Type"] = trans.apply(lambda x: 'Sell' if str.startswith(x["Details"], 'S') else "Buy", axis=1)
trans['Date'] = pd.to_datetime(trans['Date'], format='%d/%m/%Y')
trans.drop(trans.columns[5], axis=1, inplace=True)
trans.sort_index(ascending=False, inplace=True)
trans.sort_values('Date', ascending=False)
return trans
def get_transaction_frame(data_path):
''' build a data frame from all transaction files'''
pkl = f'{data_path}Transactions.pkl'
df = None
files = [f for f in glob.glob(data_path + 'Transactions*.csv')]
for f in files:
modified = time.ctime(os.path.getmtime(f))
mod = datetime.datetime.strptime(modified, "%a %b %d %H:%M:%S %Y")
#print(mod.date())
df = get_transactions(f)
df = df.loc[:,~df.columns.duplicated()]
try:
existing = pd.read_pickle(pkl)
existing = existing.rename(columns={
'Detail': 'Details',
'Credit ($)':'Credit($)',
'Debit ($)':'Debit($)',
'Balance ($)':'Balance($)'
})
drop_index = []
for index, row in df.iterrows():
has_existing = existing[existing['Reference'] == row['Reference']]
if has_existing.empty:
continue
drop_index.append(index)
if len(drop_index) > 0:
#print(drop_index)
df.drop(drop_index, inplace=True)
df = df.append(existing, ignore_index=True)
#print(len(df.index))
except Exception as e:
print('no pkl exists', str(e))
df.to_pickle(pkl)
df = df.sort_values(by=['Date'], ascending=False)
df['index'] = df.index
return df
def get_dividends(df):
''' get dividends from transaction frame '''
div = df[df['Details'].str.contains('Direct Credit') | df['Details'].str.contains('Credit Interest')]
div = div[div['Details'].str.contains('COMMONWEALTH')==False]
div["Symbol"] = div["Details"].str.split(' ').str[3]
df['Date'] = df['Date'].astype('datetime64[ns]')
df = df.reindex(index=df.index[::-1])
df['Sum'] = df['Amount'].cumsum()
return div
def get_account_transactions(file):
columns = ['Date', 'Amount', 'Details', 'Balance']
account = pd.read_csv(file, header=None, names=columns, dayfirst=True, parse_dates=['Date'])
return account
def get_account_frame(data_path):
files = [f for f in glob.glob(f'{data_path}Account*.csv')]
df = None
for f in files:
modified = time.ctime(os.path.getmtime(f))
mod = datetime.datetime.strptime(modified, "%a %b %d %H:%M:%S %Y")
#print(mod.date())
df = get_account_transactions(f)
pkl = f'{data_path}Account.pkl'
try:
existing = pd.read_pickle(pkl)
drop_index = []
for index, row in df.iterrows():
has_existing = existing[(existing['Date'] == row['Date']) & (existing['Amount'] == row['Amount'])]
if has_existing.empty:
continue
drop_index.append(index)
if len(drop_index) > 0:
df.drop(drop_index, inplace=True)
df = df.append(existing, ignore_index=True)
except Exception as e:
print('no pkl exists', str(e))
df.to_pickle(pkl)
df = df.sort_values(by=['Date'], ascending=False)
return df
def get_price_frame(data_path):
''' get price time-series ticker data ohlc '''
price_data = pd.read_pickle(f'{data_path}Prices.pkl')
price_data.drop_duplicates(subset=['Date', 'Tick'], keep='first', inplace=True)
price_data['index'] = price_data.index
return price_data
def get_companies_frame(data_path):
''' company details data frame '''
etf_data = f'{data_path}etf.json'
etf = pd.read_json(etf_data)
etf = etf.loc[1:]
etf_codes = etf['ASX Code'].tolist()
company_pkl = f'{data_path}Companies.pkl'
trans_pkl = f'{data_path}Transactions.pkl'
trans =
|
pd.read_pickle(trans_pkl)
|
pandas.read_pickle
|
#!/usr/bin/env python
from joshua.intervaltree import Interval, IntervalTree
from joshua.intervalforest import IntervalForest
import pandas as pd
import pyprind
class DataFrameInterval(Interval):
def __init__(self, idx, df, *args, **kwargs):
Interval.__init__(self, *args, **kwargs)
self.idx = idx
self.df = df
@property
def value(self):
return self.df.loc[self.idx]
def __str__(self):
return 'DataFrameInterval({s}, {e}, idx={idx}, '\
'chrom={chrom}, strand={strand})'.format(s=self.start, e=self.end, idx=self.idx,
chrom=self.chrom, strand=self.strand)
def get_gtf_coords(row):
'''
Extract the coordinates and Interval information from a GTF series
'''
return row.start, row.end, row.contig_id, row.strand
def get_blast_subject_coords(row):
'''
Extract the coordinates and Interval information for the subject in a BLAST hit
'''
return row.sstart, row.send, row.qseqid, row.sstrand
def get_blast_query_coords(row):
'''
Extract the coordinates and Interval information for the query in a BLAST hit
'''
return row.qstart, row.qend, row.sseqid, row.qstrand
def build_tree_from_group(group, ref_dataframe, coord_func, bar=None):
'''
Build an IntervalTree from a sub-DataFrame, for groupby() operations
'''
tree = IntervalTree()
for idx, row in group.iterrows():
if bar is not None:
bar.update()
start, end, chrom, strand = coord_func(row)
tree.insert_interval(DataFrameInterval(idx, ref_dataframe, start, end,
chrom=chrom, strand=strand))
return tree
def build_forest_from_groups(grouped_df, reference_df, coord_func, bar=None):
forest = IntervalForest()
for key, group in grouped_df:
tree = IntervalTree()
for idx, row in group.iterrows():
start, end, chrom, strand = coord_func(row)
tree.insert_interval(DataFrameInterval(idx, reference_df, start, end,
chrom=chrom, strand=strand))
if bar is not None:
bar.update()
forest.add_tree(key, tree)
return forest
def tree_intersect(tree_A, tree_B, cutoff=0.9):
'''
Find all overlaps of Intervals in A by B and return a `dict` of the results,
where keys are (annotation_id, alignment_id) tuples (with the id being the
interger index in the original DataFrames) and the values are the number
of bases overlapped in A.
'''
if type(tree_A) is not IntervalTree:
raise TypeError('tree_A must be a valid IntervalTree (got {t})'.format(t=type(tree_A)))
overlaps = []
if type(tree_B) is not IntervalTree:
def fn(node):
overlaps.append((node.interval.idx, None, None))
tree_A.traverse(fn)
else:
def overlap_fn(node):
iv = node.interval
res = tree_B.find(iv.start, iv.end)
if res:
for ov in res:
ov_len = calc_bases_overlapped(iv, [ov])
if (float(ov_len) / len(iv)) >= cutoff:
overlaps.append((iv.idx, ov.idx, ov_len))
else:
overlaps.append((iv.idx, ov.idx, None))
else:
overlaps.append((iv.idx, None, None))
tree_A.traverse(overlap_fn)
return overlaps
def tree_coverage_intersect(tree_A, tree_B, cutoff=0.9):
'''
Like `tree_intersect`, but merges overlapping interavals in `tree_B` to
calculate the overlap length.
'''
overlaps = []
if type(tree_A) is not IntervalTree:
raise TypeError('tree_A must be a valid IntervalTree (got {t})'.format(t=type(tree_A)))
if type(tree_B) is not IntervalTree:
def fn(node):
overlaps.append((node.interval.idx, None))
tree_A.traverse(fn)
else:
def overlap_fn(node):
iv = node.interval
ov_list = tree_B.find(iv.start, iv.end)
if ov_list:
ov_len = calc_bases_overlapped(iv, ov_list)
#assert ov_len <= (iv.end - iv.start)
if (float(ov_len) / len(iv)) >= cutoff:
overlaps.append((iv.idx, ov_len))
else:
overlaps.append((iv.idx, None))
else:
overlaps.append((iv.idx, None))
tree_A.traverse(overlap_fn)
return overlaps
def get_ann_aln_overlap_df(forest_df, cutoff=0.9, merge=False, bar=None):
'''
Perform the tree intersect between all pairs of (annotation, alignment) IntervalTrees
in the given DataFrame
'''
data = []
if merge:
for contig_id, ann_tree, aln_tree in forest_df.itertuples():
if type(ann_tree) is IntervalTree: # as opposed to NaN
d = tree_coverage_intersect(ann_tree, aln_tree, cutoff=cutoff)
data.append(pd.DataFrame(d, columns=['ann_id', 'overlap_len']))
if bar:
bar.update()
else:
for contig_id, ann_tree, aln_tree in forest_df.itertuples():
if type(ann_tree) is IntervalTree:
d = tree_intersect(ann_tree, aln_tree, cutoff=cutoff)
data.append(pd.DataFrame(d, columns=['ann_id', 'aln_id', 'overlap_len']))
if bar is not None:
bar.update()
return pd.concat(data, axis=0)
def get_aln_ann_overlap_df(forest_df, cutoff=0.9, merge=False, bar=None):
'''
Perform the tree intersect between all pairs of (alignment, annotation) IntervalTrees
in the given DataFrame
'''
data = []
if merge:
for contig_id, ann_tree, aln_tree in forest_df.itertuples():
if type(aln_tree) is IntervalTree: # as opposed to NaN
d = tree_coverage_intersect(aln_tree, ann_tree, cutoff=cutoff)
data.append(
|
pd.DataFrame(d, columns=['aln_id', 'overlap_len'])
|
pandas.DataFrame
|
"""
Testing model.py module
"""
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_index_equal, \
assert_frame_equal
from forecast_box.model import *
# TODO: Check forward steps <= 0
class ExampleModel(Model):
def __init__(self, forward_steps, ar_order, **kwargs):
Model.__init__(self, forward_steps, ar_order, **kwargs)
def _train_once(self, y_train, X_train):
return {'theta': 123}
def _predict_once(self, X_test, forward_step):
return pd.Series(data=[9, 9, 9],
index=pd.date_range('2000-01-03', periods=3))
class TestModel(TestCase):
def setUp(self):
self.time_series = pd.Series(data=[10, 9, 8, 7, 6],
index=pd.date_range('2000-01-01',
periods=5))
self.model = ExampleModel(forward_steps=[2, 3], ar_order=1)
fake_metric_fun = lambda x, y: 555
self.model.train(self.time_series, metric_fun=fake_metric_fun)
def test_create_valid_input(self):
for name, type in [('last_value', LastValue),
('mean', Mean),
('linear_regression', LinearRegression),
('random_forest', RandomForest)]:
model = Model.create(name, {'forward_steps': [2, 3],
'ar_order': 1,
'add_day_of_week': True})
self.assertIsInstance(model, type)
self.assertListEqual(model.fixed_params['forward_steps'], [2, 3])
self.assertEqual(model.fixed_params['ar_order'], 1)
self.assertEqual(model.name, name)
self.assertEqual(model.fixed_params['min_size'], 4)
self.assertEqual(model.fixed_params['add_day_of_week'], True)
def test_create_invalid_input(self):
self.assertRaises(Exception, Model.create, 'blabla',
{'forward_steps': [1, 2, 3], 'ar_order': 1})
def test_kwargs_default(self):
model = ExampleModel(forward_steps=[2, 3], ar_order=1)
self.assertEqual(model.fixed_params['add_day_of_week'], False)
def test_train_trained_params(self):
self.assertDictEqual(self.model.trained_params, {2: {'theta': 123},
3: {'theta': 123}})
def test_train_fitted_values(self):
expected = pd.Series(data=[9, 9, 9],
index=pd.date_range('2000-01-03', periods=3))
assert_frame_equal(self.model.fitted_values,
pd.DataFrame({2: expected,
3: expected}))
def test_train_residuals(self):
expected_2 = pd.Series(data=[-1, -2, -3],
index=pd.date_range('2000-01-03', periods=3))
expected_3 = pd.Series(data=[np.nan, -2.0, -3.0],
index=pd.date_range('2000-01-03', periods=3))
assert_frame_equal(self.model.residuals,
pd.DataFrame({2: expected_2, 3: expected_3}))
def test_train_metric(self):
self.assertDictEqual(self.model.metric, {2: 555, 3: 555})
def test_train_small_data(self):
time_series = pd.Series(data=[10],
index=pd.date_range('2000-01-01', periods=1))
model = ExampleModel(forward_steps=[2, 3], ar_order=1)
self.assertRaises(Exception, model.train, time_series)
def test_forecast_results(self):
expected = pd.Series(data=[9, 9, 9, 9, 9, 9],
index=pd.date_range('2000-01-03',
periods=3).append(
pd.date_range('2000-01-03', periods=3)))
assert_series_equal(self.model.forecast(self.time_series), expected)
def test_forecast_small_data(self):
time_series = pd.Series(data=[10],
index=pd.date_range('2000-01-01', periods=1))
model = ExampleModel(forward_steps=[2, 3], ar_order=1)
self.assertRaises(Exception, model.forecast, time_series)
def test_summarize(self):
pass
def test_plot(self):
pass
class TestLastValue(TestCase):
def setUp(self):
self.time_series = pd.Series(data=[1, 2, 3, 4, 5],
index=pd.date_range('2000-01-01',
periods=5))
self.model = LastValue(forward_steps=[1], ar_order=2)
self.model.train(self.time_series)
self.forecasted_values = self.model.forecast(self.time_series)
def test_predicted_values(self):
self.assertListEqual(self.model.fitted_values[1].values.tolist(),
[2.0, 3.0, 4.0])
assert_index_equal(self.model.fitted_values[1].index,
|
pd.date_range('2000-01-03', periods=3)
|
pandas.date_range
|
"""Failure analysis of national-scale networks
For transport modes at national scale:
- rail
- Can do raod as well
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. csv sheets with results of flow mapping based on MIN-MAX generalised costs estimates:
- origin - String node ID of Origin
- destination - String node ID of Destination
- origin_province - String name of Province of Origin node ID
- destination_province - String name of Province of Destination node ID
- min_edge_path - List of string of edge ID's for paths with minimum generalised cost flows
- max_edge_path - List of string of edge ID's for paths with maximum generalised cost flows
- min_distance - Float values of estimated distance for paths with minimum generalised cost flows
- max_distance - Float values of estimated distance for paths with maximum generalised cost flows
- min_time - Float values of estimated time for paths with minimum generalised cost flows
- max_time - Float values of estimated time for paths with maximum generalised cost flows
- min_gcost - Float values of estimated generalised cost for paths with minimum generalised cost flows
- max_gcost - Float values of estimated generalised cost for paths with maximum generalised cost flows
- industry_columns - All daily tonnages of industry columns given in the OD matrix data
3. Shapefiles
- edge_id - String/Integer/Float Edge ID
- geometry - Shapely LineString geomtry of edges
Results
-------
Csv sheets with results of failure analysis:
1. All failure scenarios
- edge_id - String name or list of failed edges
- origin - String node ID of Origin of disrupted OD flow
- destination - String node ID of Destination of disrupted OD flow
- origin_province - String name of Province of Origin node ID of disrupted OD flow
- destination_province - String name of Province of Destination node ID of disrupted OD flow
- no_access - Boolean 1 (no reroutng) or 0 (rerouting)
- min/max_distance - Float value of estimated distance of OD journey before disruption
- min/max_time - Float value of estimated time of OD journey before disruption
- min/max_gcost - Float value of estimated travel cost of OD journey before disruption
- new_cost - Float value of estimated cost of OD journey after disruption
- new_distance - Float value of estimated distance of OD journey after disruption
- new_path - List of string edge ID's of estimated new route of OD journey after disruption
- new_time - Float value of estimated time of OD journey after disruption
- dist_diff - Float value of Post disruption minus per-disruption distance
- time_diff - Float value Post disruption minus per-disruption timee
- min/max_tr_loss - Float value of estimated change in rerouting cost
- min/max_tons - Float values of total daily tonnages along disrupted OD pairs
- industry_columns - Float values of all daily tonnages of industry columns along disrupted OD pairs
2. Isolated OD scenarios - OD flows with no rerouting options
- edge_id - String name or list of failed edges
- origin_province - String name of Province of Origin node ID of disrupted OD flow
- destination_province - String name of Province of Destination node ID of disrupted OD flow
- industry_columns - Float values of all daily tonnages of industry columns along disrupted OD pairs
- min/max_tons - Float values of total daily tonnages along disrupted OD pairs
3. Rerouting scenarios - OD flows with rerouting options
- edge_id - String name or list of failed edges
- origin_province - String name of Province of Origin node ID of disrupted OD flow
- destination_province - String name of Province of Destination node ID of disrupted OD flow
- min/max_tr_loss - Float value of change in rerouting cost
- min/max_tons - Float values of total daily tonnages along disrupted OD pairs
4. Min-max combined scenarios - Combined min-max results along each edge
- edge_id - String name or list of failed edges
- no_access - Boolean 1 (no reroutng) or 0 (rerouting)
- min/max_tr_loss - Float values of change in rerouting cost
- min/max_tons - Float values of total daily tonnages affted by disrupted edge
"""
import ast
import copy
import csv
import itertools
import math
import operator
import os
import sys
import igraph as ig
import networkx as nx
import numpy as np
import pandas as pd
from atra.utils import *
from atra.transport_flow_and_failure_functions import *
def main():
"""Estimate failures
Specify the paths from where you want to read and write:
1. Input data
2. Intermediate calcuations data
3. Output results
Supply input data and parameters
1. Names of modes
String
2. Names of min-max tons columns in sector data
List of string types
3. Min-max names of names of different types of attributes - paths, distance, time, cost, tons
List of string types
4. Names of commodity/industry columns for which min-max tonnage column names already exist
List of string types
5. Percentage of OD flows that are assumed disrupted
List of float type
6. Condition on whether analysis is single failure or multiple failure
Boolean condition True or False
Give the paths to the input data files:
1. Network edges csv and shapefiles
2. OD flows csv file
3. Failure scenarios csv file
Specify the output files and paths to be created
"""
data_path, calc_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
modes = [
{
'sector':'rail',
'min_tons_column':'min_total_tons',
'max_tons_column':'max_total_tons',
}
]
types = ['min', 'max']
path_types = ['min_edge_path', 'max_edge_path']
dist_types = ['min_distance', 'max_distance']
time_types = ['min_time', 'max_time']
cost_types = ['min_gcost', 'max_gcost']
index_cols = ['origin_id', 'destination_id', 'origin_province', 'destination_province']
percentage = [100.0]
single_edge = True
# Give the paths to the input data files
network_data_path = os.path.join(data_path,'network')
flow_paths_data = os.path.join(output_path, 'flow_mapping_paths')
fail_scenarios_data = os.path.join(
output_path, 'hazard_scenarios')
# Specify the output files and paths to be created
shp_output_path = os.path.join(output_path, 'failure_shapefiles')
if os.path.exists(shp_output_path) == False:
os.mkdir(shp_output_path)
fail_output_path = os.path.join(output_path, 'failure_results')
if os.path.exists(fail_output_path) == False:
os.mkdir(fail_output_path)
all_fail_scenarios = os.path.join(fail_output_path,'all_fail_scenarios')
if os.path.exists(all_fail_scenarios) == False:
os.mkdir(all_fail_scenarios)
isolated_ods = os.path.join(fail_output_path,'isolated_od_scenarios')
if os.path.exists(isolated_ods) == False:
os.mkdir(isolated_ods)
isolated_ods = os.path.join(fail_output_path,'isolated_od_scenarios','multi_mode')
if os.path.exists(isolated_ods) == False:
os.mkdir(isolated_ods)
rerouting = os.path.join(fail_output_path,'rerouting_scenarios')
if os.path.exists(rerouting) == False:
os.mkdir(rerouting)
minmax_combine = os.path.join(fail_output_path,'minmax_combined_scenarios')
if os.path.exists(minmax_combine) == False:
os.mkdir(minmax_combine)
# Create the multi-modal networks
print ('* Creating multi-modal networks')
mds = ['road', 'rail', 'port', 'multi']
G_multi_df = []
for m in range(len(mds)):
# Load mode igraph network and GeoDataFrame
print ('* Loading {} igraph network and GeoDataFrame'.format(mds[m]))
G_df = pd.read_csv(os.path.join(network_data_path,'{}_edges.csv'.format(mds[m])),encoding='utf-8-sig').fillna(0)
if mds[m] == 'rail':
e_flow = pd.read_csv(os.path.join(output_path,'flow_mapping_combined','weighted_flows_{}_100_percent.csv'.format(mds[m])))[['edge_id','max_total_tons']]
G_df =
|
pd.merge(G_df,e_flow[['edge_id','max_total_tons']],how='left',on=['edge_id'])
|
pandas.merge
|
#!/usr/bin/env python3
#
# get_jma_jp
# 日本周辺のひまわり画像を最大解像度(ズームレベル6)で表示する
# 2021/03/29 初版 山下陽介(国立環境研究所)
# 2021/07/09 ズームレベル6対応
#
# Google Colaboratoryで動作するバージョンは、次のリンクから取得可能
# https://colab.research.google.com/drive/1NtZSQR-JREDH1PnL7T-eInR-CW046iKK
#
import os
import sys
import time
import cv2
import json
import numpy as np
import pandas as pd
import itertools
import urllib.request
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# 最新の画像だけを取得するかどうか
opt_latest = False # 取得開始する時刻以降の全時刻の画像を使用する場合(注意)
#opt_latest = True # 最新の画像だけを取得する場合
# 取得開始する時刻(UTC)
start_time_UTC = "20210827 22:00:00"
opt_map = False # 地図を重ねるかどうか
#opt_map = True # 地図を重ねるかどうか
# 画像の種類
#mtype = "l" # 赤外
#mtypef = "l" # 赤外
mtype = "tc" # 可視トゥルーカラー再現画像
mtypef = "t" # 可視トゥルーカラー再現画像
#mtype = "ct" # 雲頂強調
#mtypef = "m" # 雲頂強調
#
# 取得するタイル座標の設定
opt_jp = True # Trueではズームレベル6を取得
#opt_jp = False # Falseではズームレベル3〜5を取得
z = 6 # ズームレベル(全球:3〜5、日本域最大:6, 6ではopt_jp=Trueが必要)
x = 55 # x方向の開始位置
y = 26 # y方向
nmax = 4 # タイルの数
# 画像ファイルを保存するかどうか
opt_filesave = True # ファイルに保存(opt_latest = Falseの場合はデータサイズに注意)
#opt_filesave = False # 画面に表示(opt_latest = Falseの場合は全部表示されるので注意)
def os_mkdir(path_to_dir):
"""ディレクトリを作成する
Parameters:
----------
path_to_dir: str
作成するディレクトリ名
----------
"""
if not os.path.exists(path_to_dir):
os.makedirs(path_to_dir)
def get_fileinfo(opt_jp=False):
"""時刻データダウンロード
Parameters:
----------
opt_jp: bool
日本付近の最高解像度データを取得するかどうか
----------
Returns:
----------
basetimes: pandas.Series(str, str, ...)
時刻データから取得したbasetime
validtimes: pandas.Series(str, str, ...)
時刻データから取得したvalidtime
----------
"""
# ダウンロード
if opt_jp: # 日本付近のデータ
url = "https://www.jma.go.jp/bosai/himawari/data/satimg/targetTimes_jp.json"
else: # 全球画像データ
url = "https://www.jma.go.jp/bosai/himawari/data/satimg/targetTimes_fd.json"
urllib.request.urlretrieve(url, "targetTimes.json")
#
# JSON形式読み込み
with open("targetTimes.json", 'rt') as fin:
data = fin.read()
df = pd.DataFrame(json.loads(data))
print(df)
basetimes = df.loc[:, 'basetime']
validtimes = df.loc[:, 'validtime']
return basetimes, validtimes
def get_jpg(basetime=None,
validtime=None,
mtype="l",
tile="3/7/3",
opt_jp=False):
"""ひまわり画像の取得
Parameters:
----------
basetime: str
時刻データから取得したもの
validtime: str
時刻データから取得したもの
mtype: str
赤外画像("l")、可視画像("s")、水蒸気画像("v")、
トゥルーカラー再現画像("tc")、雲頂強調画像("ct")
tile: str
タイル番号
(確認ページ、https://maps.gsi.go.jp/development/tileCoordCheck.html)
opt_jp: bool
日本付近の最高解像度データを取得するかどうか
----------
Returns:
----------
im: PIL.PngImagePlugin.PngImageFile
ダウンロードした画像データ
----------
"""
if basetime is None or validtime is None:
raise ValueError("basetime and validtime are needed")
#
urlbase = "https://www.jma.go.jp/bosai/himawari/data/satimg/"
if mtype == "l": # 赤外画像
band_prod = "B13/TBB"
elif mtype == "s": # 可視画像
band_prod = "B03/ALBD"
elif mtype == "v": # 水蒸気画像
band_prod = "B08/TBB"
elif mtype == "tc": # トゥルーカラー再現画像
band_prod = "REP/ETC"
elif mtype == "ct": # 雲頂強調画像
band_prod = "SND/ETC"
else:
raise ValueError("Invalid mtyp")
# URL
if opt_jp: # 日本付近のデータ
url = urlbase + basetime + "/jp/" + validtime + "/" + band_prod + "/" + tile + ".jpg"
else: # 全球画像データ
url = urlbase + basetime + "/fd/" + validtime + "/" + band_prod + "/" + tile + ".jpg"
print(url)
im = Image.open(urllib.request.urlopen(url))
return im
def get_tile(tile="3/7/3", mtype="std"):
"""地理院タイル画像の取
Parameters:
----------
tile: str
タイル番号
(確認ページ、https://maps.gsi.go.jp/development/tileCoordCheck.html)
mtype: str
地図のタイプ
(std:標準地図、pale:淡色地図、blank:白地図(5-)、seamlessphoto:写真)
----------
Returns:
----------
im: PIL.PngImagePlugin.PngImageFile
ダウンロードした画像データ
----------
"""
urlbase = "https://cyberjapandata.gsi.go.jp/xyz/"
#"https://cyberjapandata.gsi.go.jp/xyz/std/6/57/23.png"
if basetime is None or validtime is None:
raise ValueError("Invalid mtyp")
return None
# URL
url = urlbase + mtype + "/" + tile + ".png"
print(url)
im = Image.open(urllib.request.urlopen(url))
return im
def pil2cv(im):
"""PIL型 -> OpenCV型"""
im = np.array(im, dtype=np.uint8)
if im.ndim == 2: # モノクロ
pass
elif im.shape[2] == 3: # カラー
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif im.shape[2] == 4: # 透過
im = cv2.cvtColor(im, cv2.COLOR_RGBA2BGRA)
return im
def map_blend(im1, im2, alpha=0.5, beta=0.5, gamma=0.0):
"""画像のブレンド
Parameters:
----------
im1: numpy.ndarray
画像データ1
im2: numpy.ndarray
画像データ2
alpha: float
ブレンドする比率(画像データ1)
beta: float
ブレンドする比率(画像データ2)
gamma: float
全体に足す値
----------
Returns:
----------
im: numpy.ndarray
ブレンドした画像データ
im = im1 * alpha + im2 * beta + gamma
----------
"""
# ブレンド
im = cv2.addWeighted(im1, alpha, im2, beta, gamma)
return im
def check_tile(z, x, y):
"""有効なタイルかどうかをチェックする
(404エラー回避のため)
Parameters:
----------
z: int
ズームレベル
x: int
経度方向のタイル座標
y: int
緯度方向のタイル座標
----------
"""
valid = False
if z == 6:
if y >= 21 and y <= 29 and x >= 51 and x <= 60:
valid = True
elif z == 5:
if y >= 0 and y <= 31 and x >= 19 and x <= 31:
valid = True
elif z == 4:
if y >= 0 and y <= 15 and x >= 9 and x <= 15:
valid = True
elif z == 3:
if y >= 0 and y <= 7 and x >= 4 and x <= 7:
valid = True
return valid
def draw_tile(z=5, y=12, x=27, nmax=4, file_path=None, mtype="blank"):
"""地理院タイルの作成
Parameters:
----------
z: int
ズームレベル
x: int
経度方向のタイル座標:開始座標(左上)タイルの値
y: int
緯度方向のタイル座標:開始座標(左上)タイルの値
nmax: int
タイルの数
file_path: str
保存するファイルのパス(Noneの場合は、ファイル保存しない)
mtype: str
取得する画像の種類
----------
"""
nx = max(int(np.sqrt(nmax)), 1)
ny = max(int(np.sqrt(nmax)), 1)
x_size = 9
y_size = 9
if nx * ny < nmax:
nx = nx + 1
y_size = 6
if nx * ny < nmax:
ny = ny + 1
y_size = 9
# プロットエリアの定義
fig = plt.figure(figsize=(8, 8))
# 軸の追加
xx = x
yy = y
for n in np.arange(nmax):
if xx == x + nx:
xx = x
yy = yy + 1
ax = fig.add_subplot(ny, nx, n + 1)
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
# 軸を表示しない
plt.axis('off')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
#
tile = str(z) + "/" + str(xx) + "/" + str(yy)
opt_draw = False
if check_tile(z, xx, yy):
try:
# 地理院タイルの取得
im = get_tile(tile, mtype=mtype)
time.sleep(1.0) # 1秒間待つ
opt_draw = True
except:
raise Exception("not found")
else:
raise ValueError("invalid tile")
# 画像を表示
if opt_draw:
ax.imshow(im, aspect='equal')
xx = xx + 1
# プロット範囲の調整
plt.subplots_adjust(top=1.0,
bottom=0.0,
left=0.0,
right=1.0,
wspace=0.0,
hspace=0.0)
# ファイル書き出し
if file_path is not None:
plt.savefig(file_path, dpi=150, bbox_inches='tight')
def draw_sat(z=5,
y=12,
x=27,
nmax=4,
basetime=None,
validtime=None,
file_path=None,
opt_jp=False,
mtype="tc"):
"""衛星画像を取得し結合する
Parameters:
----------
z: int
ズームレベル
x: int
経度方向のタイル座標:開始座標(左上)タイルの値
y: int
緯度方向のタイル座標:開始座標(左上)タイルの値
nmax: int
タイルの数
basetime: str
時刻データから取得したもの
validtime: str
時刻データから取得したもの
file_path: str
保存するファイルのパス(Noneの場合は、ファイル保存しない)
opt_jp: bool
日本付近の最高解像度データを取得するかどうか
mtype: str
取得する画像の種類
----------
"""
nx = max(int(np.sqrt(nmax)), 1)
ny = max(int(np.sqrt(nmax)), 1)
x_size = 9
y_size = 9
if nx * ny < nmax:
nx = nx + 1
y_size = 6
if nx * ny < nmax:
ny = ny + 1
y_size = 9
if basetime is None or validtime is None:
raise ValueError('basetime & validtime are required')
# プロットエリアの定義
fig = plt.figure(figsize=(x_size, y_size))
# 軸の追加
xx = x
yy = y
for n in np.arange(nmax):
if xx == x + nx:
xx = x
yy = yy + 1
ax = fig.add_subplot(ny, nx, n + 1)
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
# 軸を表示しない
plt.axis('off')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
#
tile = str(z) + "/" + str(xx) + "/" + str(yy)
print(tile)
opt_draw = False
if check_tile(z, xx, yy):
try:
# 画像の取得
im = get_jpg(basetime,
validtime,
mtype=mtype,
tile=tile,
opt_jp=opt_jp)
time.sleep(1.0) # 1秒間待つ
opt_draw = True
except:
raise Exception("not found")
else:
raise ValueError("invalid tile")
# 画像を表示
if opt_draw:
ax.imshow(im, aspect='equal')
xx = xx + 1
# プロット範囲の調整
plt.subplots_adjust(top=1.0,
bottom=0.0,
left=0.0,
right=1.0,
wspace=0.0,
hspace=0.0)
# ファイル書き出し
if file_path is not None:
plt.savefig(file_path, dpi=150, bbox_inches='tight')
plt.close()
def draw_jp(z=5,
y=12,
x=27,
nmax=4,
basetime=None,
validtime=None,
title=None,
file_path=None,
opt_jp=False,
opt_map=False,
mtype="tc"):
"""衛星画像の作図
Parameters:
----------
z: int
ズームレベル
x: int
経度方向のタイル座標:開始座標(左上)タイルの値
y: int
緯度方向のタイル座標:開始座標(左上)タイルの値
nmax: int
タイルの数
basetime: str
時刻データから取得したもの
validtime: str
時刻データから取得したもの
title: str
図のタイトル(Noneの場合はタイトルを付けない)
file_path: str
保存するファイルのパス(Noneの場合は、ファイル保存しない)
opt_jp: bool
日本付近の最高解像度データを取得するかどうか
opt_map: bool
地図を重ねるかどうか
mtype: str
取得する画像の種類
----------
"""
if basetime is None or validtime is None:
raise ValueError('basetime & validtime are required')
# 地理院タイルの作成
if opt_map:
if not os.path.exists("map_tile.jpg"):
draw_tile(z=z,
y=y,
x=x,
nmax=nmax,
file_path="map_tile.jpg",
mtype="blank")
# 衛星画像の作成
draw_sat(z=z,
y=y,
x=x,
nmax=nmax,
basetime=basetime,
validtime=validtime,
file_path="map_sat.jpg",
opt_jp=opt_jp,
mtype=mtype)
# プロットエリアの定義
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
# 軸を表示しない
plt.axis('off')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
if opt_map:
# 一時ファイルの読み込み
src1 = cv2.imread("map_tile.jpg")
src2 = cv2.imread("map_sat.jpg")
# 画像変換
#src1 = pil2cv(src1) #
src1 = cv2.bitwise_not(src1) # 白黒反転
src2 = pil2cv(src2) # cv2のRGB値へ変換
src2 = cv2.resize(src2, dsize=(src1.shape[0], src1.shape[1]))
# 画像をブレンド
im = map_blend(src1, src2, 0.4, 1.0)
#im = map_blend(src1, src2, 0.2, 0.8)
else:
src = cv2.imread("map_sat.jpg")
im = pil2cv(src) # cv2のRGB値へ変換
# 画像を表示
ax.imshow(im, aspect='equal')
# タイトル
if title is not None:
ax.set_title(title, fontsize=20, color='k')
# プロット範囲の調整
plt.subplots_adjust(top=1.0,
bottom=0.0,
left=0.0,
right=1.0,
wspace=0.0,
hspace=0.0)
# ファイル書き出し
if file_path is not None:
plt.savefig(file_path, dpi=150, bbox_inches='tight')
else:
plt.show()
plt.close()
if __name__ == '__main__':
# 時刻データの読み込み
basetimes, validtimes = get_fileinfo(opt_jp=False)
if opt_latest:
basetimes = pd.Series(basetimes.iloc[-1])
validtimes = pd.Series(validtimes.iloc[-1])
if start_time_UTC is not None:
time_start = pd.to_datetime(start_time_UTC)
else:
time_start = pd.to_datetime(basetimes.iloc[0])
#
for basetime, validtime in zip(basetimes, validtimes):
print(basetime, validtime)
# 時刻
time_UTC =
|
pd.to_datetime(basetime)
|
pandas.to_datetime
|
#!/usr/bin/env python3
# python script to get financial data and store it in a .csv file
import yfinance as yf
import pandas as pd
import datetime
import sys
def get_financial_data(ticker, days_back, time_interval):
today = datetime.datetime.today()
start_date = today + datetime.timedelta(days = -1*days_back)
data = yf.download(ticker, start=start_date, end=today, interval=time_interval)
return data
symbol = sys.argv[1]
interval = sys.argv[2]
data = get_financial_data(symbol, 180, interval)
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 08:58:04 2019
@author: <NAME>
"""
####################################################################
#Federal Columbia River Power System Model developed from HYSSR
#This version operates on a daily time step.
####################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def simulate(sim_years):
def ismember(A,B):
x = np.in1d(A,B)
return 1 if x==True else 0
#Data input - select flows september 1 (244 julian date)
#d=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',usecols=range(3,58), header=None, names=np.arange(0,55))
d=pd.read_csv('Synthetic_streamflows/synthetic_streamflows_FCRPS.csv',header=None)
d = d.iloc[0:(sim_years+3)*365,:]
d = d.iloc[243:len(d)-122,:]
d= d.reset_index(drop=True)
[r, c]= d.shape
for i in range(0,r):
for j in range(0,c):
if np.isnan(d.iloc[i,j]) == True:
d.iloc[i,j] = 0
no_days = int(len(d))
no_years = int(no_days/365)
calender=pd.read_excel('PNW_hydro/FCRPS/daily_streamflows.xlsx','Calender',header=None)
c=np.zeros((no_days,4))
for i in range(0,no_years):
c[i*365:i*365+365,0] = calender.iloc[:,0]
c[i*365:i*365+365,1] = calender.iloc[:,1]
c[i*365:i*365+365,2] = calender.iloc[:,2]
c[i*365:i*365+365,3] = calender.iloc[:,3]+i
#month, day, year of simulation data
months = c[:,0]
days = c[:,1]
julians = c[:,2]
no_days = len(c)
years = c[:,3]
#Simulated Runoff ("local" flow accretions defined by USACE and BPA)
local = d
#%Project indices (consistent with HYSSR)
#% No. Name HYSSR ID
#% 0 MICA 1
#% 1 ARROW 2
#% 2 LIBBY 3
#% 3 DUNCAN 5
#% 4 <NAME> 6
#% 5 HUNGRY HORSE 10
#% 6 KERR 11
#% 7 <NAME> 16
#% 8 POST FALLS 18
#% 9 <NAME> 19
#% 10 CHELAN 20
#% 11 BROWNLEE 21
#% 12 DWORSHAK 31
#% 13 NOXON 38
#% 14 ROUND BUTTE 40
#% 15 REVELSTOKE 41
#% 16 SEVEN MILE 46
#% 17 BRILLIANT 50
#% 18 <NAME> 54
#% 19 CABINET GRGE 56
#% 20 BOX CANYON 57
#% 21 BOUNDARY 58
#% 22 WANETA 59
#% 23 UPPER FALLS 61
#% 24 MONROE ST 62
#% 25 NINE MILE 63
#% 26 LONG LAKE 64
#% 27 LITTLE FALLS 65
#% 28 <NAME> 66
#% 29 WELLS 67
#% 30 ROCKY REACH 68
#% 31 ROCK ISLAND 69
#% 32 WANAPUM 70
#% 33 PRIE<NAME> 71
#% 34 OXBOW 72
#% 35 LOWER GRANITE 76
#% 36 LITTLE GOOSE 77
#% 37 LOWER MONUMENTAL 78
#% 38 <NAME> 79
#% 39 MCNARY 80
#% 40 <NAME> 81
#% 41 DALLES 82
#% 42 BONNEVILLE 83
#% 43 <NAME> 84
#% 44 PELTON 95
#% 45 <NAME> 146
#% 46 <NAME> 400
#%Simulated unregulated flows for The Dalles. These flows are used to
#%adjust rule curves for storage reservoirs. In reality, ESP FORECASTS are
#%used-- but for now, the model assumes that BPA/USACE gets the forecast
#%exactly right.
TDA_unreg = d.iloc[:,47]
############
#%Additional input to fix the model
#
#%Fix No.1 Kerr Dam lack of input from CFM
CFM5L= d.iloc[:,48]
#%add to Kerr
#
#%Fix No.2 Lower Granite lack of input from 5 sources
#%Following will be add ti LWG
ORF5H= d.iloc[:,49]
SPD5L= d.iloc[:,50]
ANA5L= d.iloc[:,51]
LIM5L= d.iloc[:,52]
WHB5H= d.iloc[:,53]
#%
#%Fix No.3 lack of input McNary
#%
YAK5H= d.iloc[:,54]
##############################################
##############################################
#%Flood control curves
MCD_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Mica_Daily',usecols='B:M',skiprows=3,header=None)
ARD_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Arrow_Daily',usecols='B:G',skiprows=3,header=None)
LIB_fc =
|
pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Libby_Daily',usecols='B:F',skiprows=3,header=None)
|
pandas.read_excel
|
import numpy
import pandas as pd
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
total_list = [];
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
global total_list
xar = []
yar = []
for index, row in total_list.iterrows():
if len(row)>1:
x = row['ItemId']
y = row['CLen']/row['RLen']
xar.append(int(x))
yar.append(int(y))
ax1.clear()
ax1.plot(xar,yar)
def init_metrics():
global total_list;
global_lists = [];
total_list = pd.DataFrame(global_lists,columns = ['SessionID', 'ItemId', 'RLen', 'CLen']);
def calc_metrics(rec_tmp, cdata):
global total_list
if (len(total_list) >0):
if (len(total_list[total_list['SessionID'].isin(cdata['SessionID'])]) > 0):
index = total_list[total_list['SessionID'].isin(cdata['SessionID'])]['ItemId'].index.tolist()
for i in index:
if (total_list.iloc[i]['ItemId'] == cdata.iloc[0]['ItemId']):
total_list.loc[i, 'CLen'] += 1
#If a recommendation is present
if (len(rec_tmp) > 0):
tmp_lsts = [];
# Append Recommendation data to empty list
tmp_lst = pd.DataFrame(tmp_lsts,columns = ['SessionID','ItemId','RLen', 'CLen']);
ts = pd.Series(rec_tmp['SessionID'])
tmp_lst['SessionID'] = ts.values
ts =
|
pd.Series(rec_tmp['ItemId'])
|
pandas.Series
|
from .static.Qua_config import *
from .Qua_assisFunc import *
import pandas as pd
import numpy as np
from .main_match import main_match
# function 1
def GetAllIdType(StudentList):
StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0])
id_col_name = ['身分別1','身分別2','身分別3','特殊身份別']
column = [str(x) for i in range(len(id_col_name)) for x in StudentList[id_col_name[i]].tolist() if str(x)!='None']
return sorted(list(set(column)))
# function 2
def DivideDF(ordered_IdList, StudentList, DormList):
StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0])
StudentList['學號'] = [str(i) for i in range(len(StudentList))] # TODO: remove!
DormList = pd.DataFrame(DormList[1:], columns=DormList[0])
StudentList = StudentList.drop(columns = Ori_ColumnToBeDrop)
BedNumDict = countBedNum(DormList)
# get get_str2int
id_dict = get_id_dict(ordered_IdList)
# StudentList = get_str2int(id_dict, StudentList) # string contain id & willingness
StudentList = get_id2int(id_dict, StudentList)
StudentList = get_willing2int(StudentList)
# divide in-out campus
StudentList = StudentList.sort_values(by = '校內外意願').reset_index(drop = True)
InCamNum = len(StudentList)-StudentList.groupby('校內外意願').count()['性別'][3]
InCam_df = StudentList.iloc[:InCamNum,:]
InCam_df = InCam_df.sort_values(by = '性別').reset_index(drop = True)
InCam_df['資格'] = [2 if (row['id_index']==1 and row['是否需要安排身障房間']=='是') else 0 for index,row in InCam_df.iterrows()]
# incampus divide boy-girl
GirlInCamNum = InCam_df.groupby(['性別']).size()['女性']
GirlInCam = InCam_df.iloc[:GirlInCamNum,:].sort_values(by='id_index').reset_index(drop=True)
BoyInCam = InCam_df.iloc[GirlInCamNum:,:].sort_values(by='id_index').reset_index(drop=True)
# WaitDF
WaitDF = StudentList.iloc[InCamNum:,:]
# get qualification of boy&girl df
GirlInCam = dealWithPreference(assign_qualificaiton(GirlInCam,BedNumDict))
BoyInCam = dealWithPreference(assign_qualificaiton(BoyInCam,BedNumDict))
GirlInCam = GirlInCam.sort_values(by='資格').reset_index(drop=True)
BoyInCam = BoyInCam.sort_values(by='資格').reset_index(drop=True)
# All-Wait DF
QuaGirlGroup = GirlInCam.groupby('資格').count()
NoQuaGirlNum = QuaGirlGroup['性別'][0]
QuaBoyGroup = BoyInCam.groupby('資格').count()
NoQuaBoyNum = QuaBoyGroup['性別'][0]
WaitAllDf = [GirlInCam.iloc[:NoQuaGirlNum,:],BoyInCam.iloc[:NoQuaBoyNum],WaitDF]
WaitDF = pd.concat(WaitAllDf)
# Output Girl&Boy df
GirlInCam = GirlInCam.iloc[NoQuaGirlNum:,:].drop(columns = AlgorithmNeedDrop).sort_values(by='id_index').reset_index(drop=True)
BoyInCam = BoyInCam.iloc[NoQuaBoyNum:,:].drop(columns = AlgorithmNeedDrop).sort_values(by='id_index').reset_index(drop=True)
GirlInCam['永久地址'] = Address2Nationality(GirlInCam['永久地址'],countryDict)
BoyInCam['永久地址'] = Address2Nationality(BoyInCam['永久地址'],countryDict)
# organize Wait df
WaitDF = WaitDF.drop(columns=Wait_Drop)
return BoyInCam, GirlInCam, WaitDF
def list2df(beds):
columns = beds[0]
data = beds[1:]
df = pd.DataFrame(data, columns = beds[0])
return df
def Match(BoyInQua, GirlInQua, beds):
beds_df = list2df(beds)
BoyInQua, GirlInQua = main_match(BoyInQua, GirlInQua, beds_df)
return BoyInQua, GirlInQua
# function4
def GetOutputDF(id_orderList, BoyQua, GirlQua, StudentList, WaitDF):
# BoyQua = pd.DataFrame(BoyQua[1:], columns=BoyQua[0])
# GirlQua = pd.DataFrame(GirlQua[1:], columns=GirlQua[0])
StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0])
StudentList['學號'] = [str(i) for i in range(len(StudentList))] # TODO: remove!
# WaitDF = pd.DataFrame(WaitDF[1:], columns=WaitDF[0])
# Divide WaitDF => campus,BOT
WaitDF = WaitDF.sort_values('校內外意願')
WillGroupNum = WaitDF.groupby('校內外意願')
CampusNum = WillGroupNum.count()['性別'][1] + WillGroupNum.count()['性別'][2]
NotBotNum = len(WaitDF) - WillGroupNum.count()['性別'][2] - WillGroupNum.count()['性別'][3]
Campus = WaitDF.iloc[:CampusNum,:].drop(columns = CampusWait_Drop_AsQua).sort_values('性別')
Bot = WaitDF.iloc[NotBotNum:,:]
# organize Campus
Campus['資格'] = [0 for i in range(len(Campus))]
CampusGirlNum = Campus.groupby('性別')['性別'].count().tolist()[0]
CampusBoy = OrderAssign(Campus.iloc[CampusGirlNum:])
CampusGirl = OrderAssign(Campus.iloc[:CampusGirlNum])
BoyQua['順位序號'] = [0 for i in range(len(BoyQua))]
GirlQua['順位序號'] = [0 for i in range(len(GirlQua))]
CampusBoy = pd.concat([BoyQua,CampusBoy]).sort_values(by='順位序號')
CampusGirl = pd.concat([GirlQua,CampusGirl]).sort_values(by='順位序號')
# get get_id2int
id_dict = get_id_dict(id_orderList)
# audit_dict = get_audit_dict(id_dict)
StudentList = get_id2int(id_dict, StudentList)
# drop and merge
CampusBoy = CampusBoy.drop(columns=Qua_Drop)
CampusGirl = CampusGirl.drop(columns=Qua_Drop)
StudentListMergeWithCampus = StudentList.drop(columns = Ori_DfDropForICampus)
CampusBoy = pd.merge(CampusBoy,StudentListMergeWithCampus,on=['學號']).reset_index(drop=True)
CampusGirl =
|
pd.merge(CampusGirl,StudentListMergeWithCampus,on=['學號'])
|
pandas.merge
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from sklearn import preprocessing
from . import utils
from . import timeseries
from . import pos
from . import txn
from .utils import APPROX_BDAYS_PER_MONTH
from functools import wraps
def plotting_context(func):
"""Decorator to set plotting context during function call."""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with context():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def context(context='notebook', font_scale=1.5, rc=None):
"""Create pyfolio default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.context(font_scale=2):
>>> pyfolio.create_full_tear_sheet()
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale,
rc=rc)
def plot_rolling_fama_french(
returns,
factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best',
ax=None, **kwargs):
"""Plots rolling Fama-French single factor betas.
Specifically, plots SMB, HML, and UMD vs. date with a legend.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
rolling_window : int, optional
The days window over which to compute the beta.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
num_months_str = '%.0f' % (rolling_window / APPROX_BDAYS_PER_MONTH)
ax.set_title(
"Rolling Fama-French Single Factor Betas (" +
num_months_str +
'-month)')
ax.set_ylabel('beta')
rolling_beta = timeseries.rolling_fama_french(
returns,
factor_returns=factor_returns,
rolling_window=rolling_window)
rolling_beta.plot(alpha=0.7, ax=ax, **kwargs)
ax.axhline(0.0, color='black')
ax.legend(['Small-Caps (SMB)',
'High-Growth (HML)',
'Momentum (UMD)'],
loc=legend_loc)
ax.set_ylim((-2.0, 2.0))
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.axhline(0.0, color='black')
ax.set_xlabel('')
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = timeseries.aggregate_returns(returns,
'monthly')
monthly_ret_table = monthly_ret_table.unstack()
monthly_ret_table = np.round(monthly_ret_table, 3)
sns.heatmap(
monthly_ret_table.fillna(0) *
100.0,
annot=True,
annot_kws={
"size": 9},
alpha=1.0,
center=0.0,
cbar=False,
cmap=matplotlib.cm.RdYlGn,
ax=ax, **kwargs)
ax.set_ylabel('Year')
ax.set_xlabel('Month')
ax.set_title("Monthly Returns (%)")
return ax
def plot_annual_returns(returns, ax=None, **kwargs):
"""
Plots a bar graph of returns by year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
ann_ret_df = pd.DataFrame(
timeseries.aggregate_returns(
returns,
'yearly'))
ax.axvline(
100 *
ann_ret_df.values.mean(),
color='steelblue',
linestyle='--',
lw=4,
alpha=0.7)
(100 * ann_ret_df.sort_index(ascending=False)
).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)
ax.axvline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Year')
ax.set_xlabel('Returns')
ax.set_title("Annual Returns")
ax.legend(['mean'])
return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs):
"""
Plots a distribution of monthly returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
monthly_ret_table = timeseries.aggregate_returns(returns, 'monthly')
ax.hist(
100 * monthly_ret_table,
color='orangered',
alpha=0.80,
bins=20,
**kwargs)
ax.axvline(
100 * monthly_ret_table.mean(),
color='gold',
linestyle='--',
lw=4,
alpha=1.0)
ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)
ax.legend(['mean'])
ax.set_ylabel('Number of months')
ax.set_xlabel('Returns')
ax.set_title("Distribution of Monthly Returns")
return ax
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[
['peak date', 'recovery date']].iterrows():
if pd.isnull(recovery):
recovery = returns.index[-1]
ax.fill_between((peak, recovery),
lim[0],
lim[1],
alpha=.4,
color=colors[i])
ax.set_title('Top %i Drawdown Periods' % top)
ax.set_ylabel('Cumulative returns')
ax.legend(['Portfolio'], 'upper left')
ax.set_xlabel('')
return ax
def plot_drawdown_underwater(returns, ax=None, **kwargs):
"""Plots how far underwaterr returns are over time, or plots current
drawdown vs. date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.percentage)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
running_max = np.maximum.accumulate(df_cum_rets)
underwater = -100 * ((running_max - df_cum_rets) / running_max)
(underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs)
ax.set_ylabel('Drawdown')
ax.set_title('Underwater Plot')
ax.set_xlabel('')
return ax
def show_perf_stats(returns, factor_returns, live_start_date=None):
"""Prints some performance metrics of the strategy.
- Shows amount of time the strategy has been run in backtest and
out-of-sample (in live trading).
- Shows Omega ratio, max drawdown, Calmar ratio, annual return,
stability, Sharpe ratio, annual volatility, alpha, and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
"""
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
returns_backtest = returns[returns.index < live_start_date]
returns_live = returns[returns.index > live_start_date]
perf_stats_live = np.round(timeseries.perf_stats(
returns_live, returns_style='arithmetic'), 2)
perf_stats_live_ab = np.round(
timeseries.calc_alpha_beta(returns_live, factor_returns), 2)
perf_stats_live.loc['alpha'] = perf_stats_live_ab[0]
perf_stats_live.loc['beta'] = perf_stats_live_ab[1]
perf_stats_live.columns = ['Out_of_Sample']
perf_stats_all = np.round(timeseries.perf_stats(
returns, returns_style='arithmetic'), 2)
perf_stats_all_ab = np.round(
timeseries.calc_alpha_beta(returns, factor_returns), 2)
perf_stats_all.loc['alpha'] = perf_stats_all_ab[0]
perf_stats_all.loc['beta'] = perf_stats_all_ab[1]
perf_stats_all.columns = ['All_History']
print('Out-of-Sample Months: ' + str(int(len(returns_live) / 21)))
else:
returns_backtest = returns
print('Backtest Months: ' + str(int(len(returns_backtest) / 21)))
perf_stats = np.round(timeseries.perf_stats(
returns_backtest, returns_style='arithmetic'), 2)
perf_stats_ab = np.round(
timeseries.calc_alpha_beta(returns_backtest, factor_returns), 2)
perf_stats.loc['alpha'] = perf_stats_ab[0]
perf_stats.loc['beta'] = perf_stats_ab[1]
perf_stats.columns = ['Backtest']
if live_start_date is not None:
perf_stats = perf_stats.join(perf_stats_live,
how='inner')
perf_stats = perf_stats.join(perf_stats_all,
how='inner')
print(perf_stats)
def plot_rolling_returns(
returns,
factor_returns=None,
live_start_date=None,
cone_std=None,
legend_loc='best',
volatility_match=False,
ax=None, **kwargs):
"""Plots cumulative rolling returns versus some benchmarks'.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Additionally, a linear cone plot may be added to the out-of-sample
returns region.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of a risk factor.
- This is in the same style as returns.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
volatility_match : bool, optional
Whether to normalize the volatility of the returns to those of the
benchmark returns. This helps compare strategies with different
volatilities. Requires passing of benchmark_rets.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
def draw_cone(returns, num_stdev, live_start_date, ax):
cone_df = timeseries.cone_rolling(
returns,
num_stdev=num_stdev,
cone_fit_end_date=live_start_date)
cone_in_sample = cone_df[cone_df.index < live_start_date]
cone_out_of_sample = cone_df[cone_df.index > live_start_date]
cone_out_of_sample = cone_out_of_sample[
cone_out_of_sample.index < returns.index[-1]]
ax.fill_between(cone_out_of_sample.index,
cone_out_of_sample.sd_down,
cone_out_of_sample.sd_up,
color='steelblue', alpha=0.25)
return cone_in_sample, cone_out_of_sample
if ax is None:
ax = plt.gca()
if volatility_match and factor_returns is None:
raise ValueError('volatility_match requires passing of'
'factor_returns.')
elif volatility_match and factor_returns is not None:
bmark_vol = factor_returns.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
df_cum_rets = timeseries.cum_returns(returns, 1.0)
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
if factor_returns is not None:
timeseries.cum_returns(factor_returns[df_cum_rets.index], 1.0).plot(
lw=2, color='gray', label=factor_returns.name, alpha=0.60, ax=ax, **kwargs)
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
if (live_start_date is None) or (df_cum_rets.index[-1] <=
live_start_date):
df_cum_rets.plot(lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
else:
df_cum_rets[:live_start_date].plot(
lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
df_cum_rets[live_start_date:].plot(
lw=4, color='red', alpha=0.6,
label='Live', ax=ax, **kwargs)
if cone_std is not None:
# check to see if cone_std was passed as a single value and,
# if so, just convert to list automatically
if isinstance(cone_std, float):
cone_std = [cone_std]
for cone_i in cone_std:
cone_in_sample, cone_out_of_sample = draw_cone(
returns,
cone_i,
live_start_date,
ax)
cone_in_sample['line'].plot(
ax=ax,
ls='--',
label='Backtest trend',
lw=2,
color='forestgreen',
alpha=0.7,
**kwargs)
cone_out_of_sample['line'].plot(
ax=ax,
ls='--',
label='Predicted trend',
lw=2,
color='red',
alpha=0.7,
**kwargs)
ax.axhline(1.0, linestyle='--', color='black', lw=2)
ax.set_ylabel('Cumulative returns')
ax.set_title('Cumulative Returns')
ax.legend(loc=legend_loc)
ax.set_xlabel('')
return ax
def plot_rolling_beta(returns, factor_returns, legend_loc='best',
ax=None, **kwargs):
"""
Plots the rolling 6-month and 12-month beta versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.set_title("Rolling Portfolio Beta to " + factor_returns.name)
ax.set_ylabel('Beta')
rb_1 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6)
rb_1.plot(color='steelblue', lw=3, alpha=0.6, ax=ax, **kwargs)
rb_2 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 12)
rb_2.plot(color='grey', lw=3, alpha=0.4, ax=ax, **kwargs)
ax.set_ylim((-2.5, 2.5))
ax.axhline(rb_1.mean(), color='steelblue', linestyle='--', lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=2)
ax.set_xlabel('')
ax.legend(['6-mo',
'12-mo'],
loc=legend_loc)
return ax
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best', ax=None, **kwargs):
"""
Plots the rolling Sharpe ratio versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_window : int, optional
The days window over which to compute the sharpe ratio.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
rolling_sharpe_ts = timeseries.rolling_sharpe(
returns, rolling_window)
rolling_sharpe_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,
**kwargs)
ax.set_title('Rolling Sharpe ratio (6-month)')
ax.axhline(
rolling_sharpe_ts.mean(),
color='steelblue',
linestyle='--',
lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylim((-3.0, 6.0))
ax.set_ylabel('Sharpe ratio')
ax.set_xlabel('')
ax.legend(['Sharpe', 'Average'],
loc=legend_loc)
return ax
def plot_gross_leverage(returns, gross_lev, ax=None, **kwargs):
"""Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
gross_lev : pd.Series, optional
The leverage of a strategy.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
gross_lev.plot(alpha=0.8, lw=0.5, color='g', legend=False, ax=ax,
**kwargs)
ax.axhline(gross_lev.mean(), color='g', linestyle='--', lw=3,
alpha=1.0)
ax.set_title('Gross Leverage')
ax.set_ylabel('Gross Leverage')
ax.set_xlabel('')
return ax
def plot_exposures(returns, positions_alloc, ax=None, **kwargs):
"""Plots a cake chart of the long and short exposure.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See
pos.get_percent_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
df_long_short = pos.get_long_short_pos(positions_alloc)
df_long_short.plot(
kind='area', color=['lightblue', 'green'], alpha=1.0,
ax=ax, **kwargs)
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_title("Long/Short Exposure")
ax.set_ylabel('Exposure')
ax.set_xlabel('')
return ax
def show_and_plot_top_positions(returns, positions_alloc,
show_and_plot=2, hide_positions=False,
legend_loc='real_best', ax=None,
**kwargs):
"""Prints and/or plots the exposures of the top 10 held positions of
all time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_percent_alloc.
show_and_plot : int, optional
By default, this is 2, and both prints and plots.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
By default, the legend will display below the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes, conditional
The axes that were plotted on.
"""
df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(
positions_alloc)
if show_and_plot == 1 or show_and_plot == 2:
print("\n")
print('Top 10 long positions of all time (and max%)')
print(pd.DataFrame(df_top_long).index.values)
print(np.round(pd.DataFrame(df_top_long)[0].values, 3))
print("\n")
print('Top 10 short positions of all time (and max%)')
print(pd.DataFrame(df_top_short).index.values)
print(np.round(pd.DataFrame(df_top_short)[0].values, 3))
print("\n")
print('Top 10 positions of all time (and max%)')
print(
|
pd.DataFrame(df_top_abs)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import requests
import shutil
from typing import *
import custom_paths
import utils
from data import DataInfo
import openml
import mat4py
def download_if_not_exists(url: str, dest: str):
"""
Simple function for downloading a file from an url if no file at the destination path exists.
:param url: URL of the file to download.
:param dest: Path where to save the downloaded file.
"""
# following https://dzone.com/articles/simple-examples-of-downloading-files-using-python
utils.ensureDir(dest)
if not utils.existsFile(dest):
print('Downloading ' + url, flush=True)
# file = requests.get(url)
# open(dest, 'wb').write(file.content)
r = requests.get(url, stream=True)
with open(dest, 'wb') as f:
print('Progress (dot = 1 MB): ', end='', flush=True)
for ch in r.iter_content(chunk_size=1024**2):
print('.', end='', flush=True)
f.write(ch)
print(flush=True)
class PandasTask:
"""
This class represents a task (data set with indicated target variable) given by Pandas DataFrames.
Additionally, a dedicated train-test split can be specified
and the name of the data set needs to be specified for saving.
This class provides a variety of methods for altering the task by different preprocessing methods.
"""
def __init__(self, x_df: pd.DataFrame, y_df: pd.Series, ds_name: str, cat_indicator: Optional[List[bool]] = None,
train_test_split: Optional[int] = None):
"""
:param x_df: DataFrame containing the inputs (covariates).
:param y_df: pd.Series containing the targets.
:param ds_name: Name for saving the data set.
:param cat_indicator: Optional.
One may specify a list of booleans which indicate whether each column of x is a category (True) or not (False).
Otherwise, the column types in x_df will be used to decide whether a column is categorical or not.
:param train_test_split: Optional. An integer can be specified as the index of the first test sample,
if the data set has a dedicated test set part at the end.
"""
if cat_indicator is None:
cat_indicator = [not
|
pd.api.types.is_numeric_dtype(x_df[x_df.columns[i]])
|
pandas.api.types.is_numeric_dtype
|
import streamlit as st
import streamlit.components.v1 as stc
import time
from random import random
import numpy as np
import pandas as pd
import altair as alt
from altair import Chart, X, Y, Axis, SortField, OpacityValue
# 2020-10-25 edit@ from st.annotated_text import annotated_text
from annotated_text import annotated_text
import st_state
def main():
st.beta_set_page_config(
page_title="AB Testing", # String or None. Strings get appended with "• Streamlit".
page_icon="🎲", # String, anything supported by st.image, or None.
layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto") # Can be "auto", "expanded", "collapsed"
# load state object
state = st_state._get_state()
# ==================== Nav Bar ==================== #
if state.nav is None: state.nav = 0
nav = state.nav
part1, part2, part3 = st.beta_columns([1, 1, 1])
pages = ['⚪ Part I: Probability ',
'⚪ Part II: Error ',
'⚪ Part III: P-values ']
pages[nav] = '🔴 ' + pages[nav][2:]
with part1:
if st.button(pages[0]): state.nav = 0
with part2:
if st.button(pages[1]): state.nav = 1
with part3:
if st.button(pages[2]): state.nav = 2
st.markdown('---')
if nav == 0: ############ PART I ############
st.header('👩🔬 Exploring Intuitions Around AB Testing')
st.write('In AB testing we want to know how often an event occurs, and compare it against a competing design. In practice however, we can only observe the outcome of measuring an event, and not the true conversion rate behind it. ')
st.write('For example, we may observe that 2/10 visitors click a button. So how many clicks would we expect if we had 100 visitors? By generating random numbers we can simulate behaviour on our website.')
st.header('🎲 Random Click Generator')
conversion_rate = st.number_input('True Conversion Rate', value=0.2)
n_samples = st.number_input('Sample size (people)', value=100)
# ============== Setup placeholder chart =============== #
res = []
df = pd.DataFrame()
df['A'] = pd.Series(res)
df['conv'] = (df['A']>(1-conversion_rate)).astype(int)
df = df.sort_values('conv')
df['converted'] = df['conv'].map({1:'Yes', 0:'No'})
scatter = alt.Chart(df).mark_circle(size=60).encode(
x=alt.X('converted'),
y=alt.Y('A')
).properties(width=300, height=300)
hist = alt.Chart(df).mark_bar(size=40).encode(
alt.X('count()'),
alt.Y("conv"),
).properties(width=300, height=300)
scatter_plot = st.altair_chart(scatter | hist, use_container_width=True)
if st.button('🔴 Run'):
st.code(f'if the random number is > {1-conversion_rate:0.2f}: \n\tthen the user clicks the button')
for i in range(n_samples):
res.append(random())
df = pd.DataFrame()
df['A'] = pd.Series(res)
df['conv'] = (df['A']>(1-conversion_rate)).astype(int)
df = df.sort_values('conv')
df['converted'] = df['conv'].map({1:'Yes', 0:'No'})
scatter = alt.Chart(df.reset_index()).mark_circle(size=60).encode(
x=alt.X('index'),
y=alt.Y('A'),
color=alt.Color('converted', title='', legend=None)
).properties(width=300, height=300)
x_max = max(df.converted.value_counts().values)
hist = alt.Chart(df).mark_bar(size=40).encode(
alt.X('count()', scale=alt.Scale(domain=[0, x_max], clamp=True)),
alt.Y("conv"),
color=alt.Color('converted', title='', legend=None)
).properties(width=300, height=300)
text = hist.mark_text(
align='left', fontSize=12,
baseline='middle',
dx = 3,
color='black' # Nudges text to right so it doesn't appear on top of the bar
).encode(
x='count():Q',
text=alt.Text('count()', format='.0f')
)
scatter_plot.altair_chart(scatter | (hist + text), use_container_width=False)
if n_samples < 20: wait_period = 0.20
else: wait_period = 1 / n_samples
time.sleep(wait_period)
results_yes = df[df.converted=='Yes']
results_no = df[df.converted=='No']
result_text_1 = f'Observed Conversion Rate = {df.conv.mean():0.2f}'
st.info(f'True Conversion rate = {conversion_rate:0.2f}')
if df.shape[0] >1:
annotated_text("Simulation 👩🔬 ",
(result_text_1, f"{len(results_yes)}/{df.shape[0]} (random numbers > {1-conversion_rate:0.2f})", "#fea"))
elif nav == 1: ############ PART II ############
st.header("👩🔬 Testing with Variations")
st.write('In an AB test, we wish to compare the performance of two design variations with the same function.')
# st.image("img/ab_traffic_bw.JPG", width = 700)
st.write('When we measure the click-through rate of these two variations, we can calculate the observed conversion.')
st.write('When we simulate the true conversion rates, how frequently does the outcome represent the truth? By running the experiment numerous times, we see how many false positives occur.\n\n')
# ================== AB Test Sliders ================== #
col1, col2 = st.beta_columns([1, 1]) # first column 1x the size of second
with col1:
st.header("📺 Variation A")
a_conversion = st.slider('True Conversion Rate', 0., 1., 0.32)
with col2:
st.header("📺 Variation B")
b_conversion = st.slider('True Conversion Rate', 0., 1., 0.36)
st.write('')
st.write('')
# ============== Setup placeholder chart =============== #
dx = pd.DataFrame([[a_conversion, b_conversion] for x in range(10)], columns=["A_Conv", "B_Conv"])
dx.index.name = "x"
y_max = max([a_conversion,b_conversion])+0.1
y_min = max(0, min([a_conversion,b_conversion])-0.15)
data = dx.reset_index().melt('x')
lines = alt.Chart(data).mark_line().encode(
x=alt.X('x', title='Iteration', axis=alt.Axis(tickMinStep=1)),
y=alt.Y('value', title='Conversion', scale=alt.Scale(domain=[y_min, y_max])),
color=alt.Color('variable', title=''))
labels = lines.mark_text(align='left', baseline='middle', dx=3).encode(
alt.X('x:Q', aggregate='max'),
text='value:Q')
line_plot = st.altair_chart(lines+labels, use_container_width=True)
# ==================== User inputs ==================== #
n_samples = st.number_input('Samples (i.e. number of customers)', min_value=0, max_value=5001, value=500)
n_experiments = st.number_input('Iterations (how many times to run the experiment?)', min_value=0, max_value=1000, value=50)
res_a, res_b = [], []
if st.button('🔴 Run'):
for i in range(n_experiments):
A = [random() for x in range(n_samples)]
B = [random() for x in range(n_samples)]
df = pd.DataFrame()
df['A'] = pd.Series(A)
df['A_conv'] = (df['A']>(1-a_conversion)).astype(int)
df['B'] = pd.Series(B)
df['B_conv'] = (df['B']>(1-b_conversion)).astype(int)
res_a.append(df.A_conv.mean())
res_b.append(df.B_conv.mean())
dx = pd.DataFrame()
dx[f'A_Conv'] = pd.Series(res_a)
dx[f'B_Conv'] = pd.Series(res_b)
d_res = dx.copy()
dx.index.name = "x"
dx = dx.reset_index().melt('x') # nice shape for altair
base = alt.Chart(dx)
lines = alt.Chart(dx).mark_line().encode(
x=alt.X('x', title='Iterations', axis=alt.Axis(tickMinStep=1)),
y=alt.Y('value', title='Conversion', scale=alt.Scale(domain=[y_min, y_max])),
color=alt.Color('variable', title=''),
tooltip = [alt.Tooltip('x:N'), alt.Tooltip('value:N')]
)
rule = base.mark_rule(strokeDash=[5,3]).encode(
y='average(value)',
color=alt.Color('variable'),
opacity=alt.value(0.4),
size=alt.value(2)
)
hover = alt.selection_single(
fields=["x"],
nearest=True,
on="mouseover",
empty="none",
clear="mouseout"
)
tooltips = alt.Chart(dx).transform_pivot(
"x", "value", groupby=["x"]
).mark_rule().encode(
x='x:Q',
opacity=alt.condition(hover, alt.value(0.3), alt.value(0)),
tooltip=["x:Q", "value"]
).add_selection(hover)
labels = lines.mark_text(align='left', baseline='middle', dx=3).encode()
line_plot.altair_chart(lines + rule + labels + tooltips, use_container_width=True)
if n_experiments < 20: wait_period = 0.05
else: wait_period = 1 / n_experiments
time.sleep(wait_period)
results_text_2 = f"{d_res[d_res['B_Conv'] < d_res['A_Conv']].shape[0]}/{n_experiments}"
if df.shape[0] >1:
annotated_text("Experiment Results 👨🔬 ",
(results_text_2,
"False positives", "#fea"))
st.text(f"Simulation failures: {d_res[d_res['B_Conv'] < d_res['A_Conv']].shape[0]}/{n_experiments} (false positives)")
elif nav == 2: ######## PART III ############
st.header('Part 3: Statistical Significance')
st.write('P-value calculations assume that the null hypothesis is true, and uses that assumption to determine the likelihood of obtaining your observed sample data.')
st.warning("Given the null hypothesis (no difference except random variation), what is the likelihood that we would see these results?")
st.write('In simpler terms, the P-value measures the probability of the result being a winner.')
st.info('👉 The P-value is the false positive probability.')
st.write('So in our generated simulation, how many times do false positives occur?')
# ================== AB Test Sliders ================== #
col1, col2 = st.beta_columns([1, 1])
with col1:
st.header("📺 Variation A")
a_conversion = st.slider('True Conversion Rate',0., 1., 0.32)
with col2:
st.header("📺 Variation B")
b_conversion = st.slider('True Conversion Rate',0., 1., 0.36)
# Simulate outcomes to find false positive rate
n_samples = st.number_input('Samples (i.e. number of customers)', 1000)
n_experiments = st.number_input('Iterations (how many times to run the experiment?)', min_value=0, max_value=1000, value=100)
simulations = st.number_input('Simulations (n × iterations)', min_value=0, max_value=1000, value=20)
x = []
res_a, res_b = [], []
if st.button('🔴 Run'):
with st.spinner('Calculating...'):
for i in range(simulations):
res_a, res_b = [], []
for j in range(n_experiments):
A = [random() for x in range(n_samples)]
B = [random() for x in range(n_samples)]
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.