prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import sys
import time
import numpy as np
import pandas as pd
import math
import warnings
from transformUtils import transformFUnion
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
warnings.filterwarnings('ignore') #cleaner, but not recommended
def readNprep():
# Read,isolate target and combine training and test data
train = pd.read_csv("~/datasets/homeCreditTrain.csv", delimiter=",", header=None)
test = pd.read_csv("~/datasets/homeCreditTest.csv", delimiter=",", header=None)
train = train.iloc[1:,:] #remove header
train.drop(1, axis=1, inplace=True); #remove target
train.columns = [*range(0,121)] #rename header from 0 to 120
test = test.iloc[1:,:]
home =
|
pd.concat([train, test])
|
pandas.concat
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
|
Index(['a', np.nan])
|
pandas.core.index.Index
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected =
|
tm.box_expected(expected, box_with_array)
|
pandas._testing.box_expected
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx =
|
Index(['a', 'b', 'c'])
|
pandas.Index
|
import random
import pandas as pd
from scipy.spatial.distance import cosine
from tqdm import tqdm
from preprocessing.duration_matrix import DurationSparseMatrix
DATA = 'data/'
POSTPROCESSING = 'postprocessing/'
def get_history_by_user(user_id: int) -> list:
df =
|
pd.read_csv(f'{DATA}{POSTPROCESSING}watch_history.csv', index_col=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import datetime as dt
from data_utils import Config
import gc
def merge_data():
"""
A function where I do feature extraction
while in the same time merging those features
with new data sources """
config = Config()
filename_train, filename_test = "../data/train.csv", "../data/test.csv"
# create datasets
train, test = config.load_data(filename_train, filename_test, print_EDA=False)
# 1. datetime features
# diff between weekday and day?
#weekday - Return the day of the week as an integer, where Monday is 0 and Sunday is 6.
#day - Between 1 and the number of days in the given month of the given year.
train['pickup_hour'] = train.pickup_datetime.dt.hour.astype('uint8')
train['pickup_day'] = train.pickup_datetime.dt.day.astype('uint8')
train['pickup_weekday'] = train.pickup_datetime.dt.weekday.astype('uint8')
train['pickup_minute'] = train.pickup_datetime.dt.minute.astype('uint8')
train['pickup_month'] = train.pickup_datetime.dt.month.astype('uint8')
train['pickup_hour_weekofyear'] = train['pickup_datetime'].dt.weekofyear
train['pickup_weekday_hour'] = train['pickup_weekday']*24 + train['pickup_hour']
test['pickup_hour'] = test.pickup_datetime.dt.hour.astype('uint8')
test['pickup_day'] = test.pickup_datetime.dt.day.astype('uint8')
test['pickup_weekday'] = test.pickup_datetime.dt.weekday.astype('uint8')
test['pickup_minute'] = test.pickup_datetime.dt.minute.astype('uint8')
test['pickup_month'] = test.pickup_datetime.dt.month.astype('uint8')
test['pickup_hour_weekofyear'] = test['pickup_datetime'].dt.weekofyear
test['pickup_weekday_hour'] = test['pickup_weekday']*24 + test['pickup_hour']
# 2. Location features
def haversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c # AVG_EARTH_RADIUS=6367
miles = km * 0.621371
return miles
# def dummy_manhattan_distance(lat1, lng1, lat2, lng2):
# a = haversine_array(lat1, lng1, lat1, lng2)
# b = haversine_array(lat1, lng1, lat2, lng1)
# return a + b
# def bearing_array(lat1, lng1, lat2, lng2):
# AVG_EARTH_RADIUS = 6371 # in km
# lng_delta_rad = np.radians(lng2 - lng1)
# lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
# y = np.sin(lng_delta_rad) * np.cos(lat2)
# x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)
# return np.degrees(np.arctan2(y, x))
train['distance'] = haversine(train.pickup_longitude, train.pickup_latitude,
train.dropoff_longitude, train.dropoff_latitude)
test['distance'] = haversine(test.pickup_longitude, test.pickup_latitude,
test.dropoff_longitude, test.dropoff_latitude)
# 3. Use outsource data
weatherdata_filename = "../data/outsource_data/weather_data_nyc_centralpark_2016.csv"
fastestroute_data_train = "../data/outsource_data/fastest_train.csv"
fastestroute_data_test = "../data/outsource_data/fastest_routes_test.csv"
wd = pd.read_csv(weatherdata_filename, header=0)
wd['date'] =
|
pd.to_datetime(wd.date, format="%d-%m-%Y")
|
pandas.to_datetime
|
from typing import List, Text, Dict
from dataclasses import dataclass
import ssl
import urllib.request
from io import BytesIO
from zipfile import ZipFile
from urllib.parse import urljoin
from logging import exception
import os
from re import findall
from datetime import datetime, timedelta
import lxml.html as LH
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
from selenium.webdriver.support.ui import WebDriverWait
import warnings
import string
import re
from bs4 import BeautifulSoup
import requests
import glob
import time
import os
from fake_useragent import UserAgent
import brFinance.utils as utils
import pickle
ssl._create_default_https_context = ssl._create_unverified_context
warnings.simplefilter(action='ignore', category=FutureWarning)
@dataclass
class SearchENET:
"""
Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx according to the input parameters
"""
def __init__(self, cod_cvm: int = None, category: int = None, driver: utils.webdriver = None):
self.driver = driver
# self.cod_cvm_dataframe = self.cod_cvm_list()
self.cod_cvm = cod_cvm
if cod_cvm is not None:
self.check_cod_cvm_exist(self.cod_cvm)
self.category = category
if category is not None:
self.check_category_exist(self.category)
def cod_cvm_list(self) -> pd.DataFrame:
"""
Returns a dataframe of all CVM codes and Company names availble at https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx
"""
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx")
#wait_pageload()
for retrie in range(50):
try:
html = str(driver.find_element_by_id('hdnEmpresas').get_attribute("value"))
listCodCVM = re.findall("(?<=\_)(.*?)(?=\')", html)
listNomeEmp = re.findall("(?<=\-)(.*?)(?=\')", html)
codigos_cvm = pd.DataFrame(list(zip(listCodCVM, listNomeEmp)),
columns=['codCVM', 'nome_empresa'])
codigos_cvm['codCVM'] = pd.to_numeric(codigos_cvm['codCVM'])
if len(codigos_cvm.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
if self.driver is None:
driver.quit()
return codigos_cvm
def check_cod_cvm_exist(self, cod_cvm) -> bool:
codigos_cvm_available = self.cod_cvm_list()
cod_cvm_exists = str(cod_cvm) in [str(cod_cvm_aux) for cod_cvm_aux in codigos_cvm_available['codCVM'].values]
if cod_cvm_exists:
return True
else:
raise ValueError('Código CVM informado não encontrado.')
def check_category_exist(self, category) -> bool:
search_categories_list = [21, 39]
if category in search_categories_list:
return True
else:
raise ValueError('Invalid category value. Available categories are:', search_categories_list)
@property
def search(self) -> pd.DataFrame:
"""
Returns dataframe of search results including cod_cvm, report's url, etc.
"""
dataInicial = '01012010'
dataFinal = datetime.today().strftime('%d%m%Y')
option_text = str(self.category)
if self.driver is None:
driver = utils.Browser.run_chromedriver()
else:
driver=self.driver
driver.get(f"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx?codigoCVM={str(self.cod_cvm)}")
# Wait and click cboCategorias_chosen
for errors in range(10):
try:
driver.find_element_by_id('cboCategorias_chosen').click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath(
f"//html/body/form[1]/div[3]/div/fieldset/div[5]/div[1]/div/div/ul/li[@data-option-array-index='{option_text}']").click()
break
except:
time.sleep(1)
# Wait and click
for errors in range(10):
try:
driver.find_element_by_xpath("//html/body/form[1]/div[3]/div/fieldset/div[4]/div[1]/label[4]").click()
break
except:
time.sleep(1)
# Wait and send keys txtDataIni
for errors in range(10):
try:
driver.find_element_by_id('txtDataIni').send_keys(dataInicial)
break
except:
time.sleep(1)
# Wait and send keys txtDataFim
for errors in range(10):
try:
driver.find_element_by_id('txtDataFim').send_keys(dataFinal)
break
except:
time.sleep(1)
# Wait and click btnConsulta
for errors in range(10):
try:
driver.find_element_by_id('btnConsulta').click()
break
except:
time.sleep(1)
# Wait html table load the results (grdDocumentos)
for errors in range(10):
try:
table_html = pd.read_html(str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML")))[-1]
if len(table_html.index) > 0:
break
else:
time.sleep(1)
except:
time.sleep(1)
table_html = str(driver.find_element_by_id('grdDocumentos').get_attribute("outerHTML"))
table = LH.fromstring(table_html)
results =
|
pd.read_html(table_html)
|
pandas.read_html
|
'''
Portfolio Analysis : Volatility
'''
# %% set system path
import sys,os
sys.path.append(os.path.abspath(".."))
# %% import data
import pandas as pd
month_return = pd.read_hdf('.\\data\\month_return.h5', key='month_return')
company_data = pd.read_hdf('.\\data\\last_filter_pe.h5', key='data')
trade_data =
|
pd.read_hdf('.\\data\\mean_filter_trade.h5', key='data')
|
pandas.read_hdf
|
import nlu
from nlu.discovery import Discoverer
from nlu.pipe.utils.storage_ref_utils import StorageRefUtils
from typing import List, Tuple, Optional, Dict, Union
import streamlit as st
from nlu.utils.modelhub.modelhub_utils import ModelHubUtils
import numpy as np
import pandas as pd
from nlu.pipe.viz.streamlit_viz.streamlit_utils_OS import StreamlitUtilsOS
from nlu.pipe.viz.streamlit_viz.gen_streamlit_code import get_code_for_viz
from nlu.pipe.viz.streamlit_viz.styles import _set_block_container_style
import random
from nlu.pipe.viz.streamlit_viz.streamlit_viz_tracker import StreamlitVizTracker
class WordEmbeddingManifoldStreamlitBlock():
@staticmethod
def viz_streamlit_word_embed_manifold(
pipe, # nlu pipe
default_texts: List[str] = ("<NAME> likes to party!", "<NAME> likes to party!", 'Peter HATES TO PARTTY!!!! :('),
title: Optional[str] = "Lower dimensional Manifold visualization for word embeddings",
sub_title: Optional[str] = "Apply any of the 11 `Manifold` or `Matrix Decomposition` algorithms to reduce the dimensionality of `Word Embeddings` to `1-D`, `2-D` and `3-D` ",
write_raw_pandas : bool = False ,
default_algos_to_apply : List[str] = ("TSNE", "PCA"),#,'LLE','Spectral Embedding','MDS','ISOMAP','SVD aka LSA','DictionaryLearning','FactorAnalysis','FastICA','KernelPCA',), # LatentDirichletAllocation 'NMF',
target_dimensions : List[int] = (1,2,3),
show_algo_select : bool = True,
show_embed_select : bool = True,
show_color_select: bool = True,
MAX_DISPLAY_NUM:int=100,
display_embed_information:bool=True,
set_wide_layout_CSS:bool=True,
num_cols: int = 3,
model_select_position:str = 'side', # side or main
key:str = "NLU_streamlit",
additional_classifiers_for_coloring:List[str]=['pos', 'sentiment.imdb'],
generate_code_sample:bool = False,
show_infos:bool = True,
show_logo:bool = True,
n_jobs: Optional[int] = 3, # False
):
from nlu.pipe.viz.streamlit_viz.streamlit_utils_OS import StreamlitUtilsOS
StreamlitVizTracker.footer_displayed=False
try :
import plotly.express as px
from sklearn.metrics.pairwise import distance_metrics
except :st.error("You need the sklearn and plotly package in your Python environment installed for similarity visualizations. Run <pip install sklearn plotly>")
if len(default_texts) > MAX_DISPLAY_NUM : default_texts = default_texts[:MAX_DISPLAY_NUM]
if show_logo :StreamlitVizTracker.show_logo()
if set_wide_layout_CSS : _set_block_container_style()
if title:st.header(title)
if sub_title:st.subheader(sub_title)
# if show_logo :VizUtilsStreamlitOS.show_logo()
# VizUtilsStreamlitOS.loaded_word_embeding_pipes = []
data = st.text_area('Enter N texts, seperated by new lines to visualize Word Embeddings for ','\n'.join(default_texts))
if len(data) > MAX_DISPLAY_NUM : data = data[:MAX_DISPLAY_NUM]
data_split = data.split("\n")
while '' in data_split : data_split.remove('')
data = data_split.copy()
while '' in data : data.remove('')
if len(data)<=1:
st.error("Please enter more than 2 lines of text, seperated by new lines (hit <ENTER>)")
return
# TODO dynamic color inference for plotting
if show_color_select:
if model_select_position == 'side' : feature_to_color_by = st.sidebar.selectbox('Pick a feature to color points in manifold by ',['pos','sentiment',],0)
else:feature_to_color_by = st.selectbox('Feature to color plots by ',['pos','sentiment',],0)
text_col = 'token'
embed_algos_to_load = []
new_embed_pipes = []
e_coms = StreamlitUtilsOS.find_all_embed_components(pipe)
if show_algo_select :
exp = st.beta_expander("Select additional manifold and dimension reduction techniques to apply")
algos = exp.multiselect(
"Reduce embedding dimensionality to something visualizable",
options=("TSNE", "ISOMAP",'LLE','Spectral Embedding','MDS','PCA','SVD aka LSA','DictionaryLearning','FactorAnalysis','FastICA','KernelPCA',),default=default_algos_to_apply,)
emb_components_usable = [e for e in Discoverer.get_components('embed',True, include_aliases=True) if 'chunk' not in e and 'sentence' not in e]
loaded_embed_nlu_refs = []
loaded_classifier_nlu_refs = []
loaded_storage_refs = []
for c in e_coms :
if not hasattr(c.info,'nlu_ref'): continue
r = c.info.nlu_ref
if 'en.' not in r and 'embed.' not in r and 'ner' not in r : loaded_embed_nlu_refs.append('en.embed.' + r)
elif 'en.' in r and 'embed.' not in r and 'ner' not in r:
r = r.split('en.')[0]
loaded_embed_nlu_refs.append('en.embed.' + r)
else :
loaded_embed_nlu_refs.append(StorageRefUtils.extract_storage_ref(c))
loaded_storage_refs.append(StorageRefUtils.extract_storage_ref(c))
for p in StreamlitVizTracker.loaded_word_embeding_pipes :
if p != pipe : loaded_embed_nlu_refs.append(p.nlu_ref)
loaded_embed_nlu_refs = list(set(loaded_embed_nlu_refs))
for l in loaded_embed_nlu_refs:
if l not in emb_components_usable : emb_components_usable.append(l)
emb_components_usable.sort()
loaded_embed_nlu_refs.sort()
if model_select_position =='side':
embed_algo_selection = st.sidebar.multiselect("Pick additional Word Embeddings for the Dimension Reduction",options=emb_components_usable,default=loaded_embed_nlu_refs,key = key)
embed_algo_selection=[embed_algo_selection[-1]]
else :
exp = st.beta_expander("Pick additional Word Embeddings")
embed_algo_selection = exp.multiselect("Pick additional Word Embeddings for the Dimension Reduction",options=emb_components_usable,default=loaded_embed_nlu_refs,key = key)
embed_algo_selection=[embed_algo_selection[-1]]
embed_algos_to_load = list(set(embed_algo_selection) - set(loaded_embed_nlu_refs))
for embedder in embed_algos_to_load:new_embed_pipes.append(nlu.load(embedder))# + f' {" ".join(additional_classifiers_for_coloring)}'))
StreamlitVizTracker.loaded_word_embeding_pipes+=new_embed_pipes
if pipe not in StreamlitVizTracker.loaded_word_embeding_pipes: StreamlitVizTracker.loaded_word_embeding_pipes.append(pipe)
for nlu_ref in additional_classifiers_for_coloring :
already_loaded=False
if 'pos' in nlu_ref : continue
# for p in VizUtilsStreamlitOS.loaded_document_classifier_pipes:
# if p.nlu_ref == nlu_ref : already_loaded = True
# if not already_loaded : VizUtilsStreamlitOS.loaded_token_level_classifiers.append(nlu.load(nlu_ref))
else :
for p in StreamlitVizTracker.loaded_document_classifier_pipes:
if p.nlu_ref == nlu_ref : already_loaded = True
if not already_loaded :
already_loaded=True
StreamlitVizTracker.loaded_document_classifier_pipes.append(nlu.load(nlu_ref))
col_index = 0
cols = st.beta_columns(num_cols)
def are_cols_full(): return col_index == num_cols
token_feature_pipe = StreamlitUtilsOS.get_pipe('pos')
#not all pipes have sentiment/pos etc.. models for hueing loaded....
## Lets FIRST predict with the classifiers/Token level feature generators and THEN apply embed pipe
for p in StreamlitVizTracker.loaded_word_embeding_pipes :
data = data_split.copy()
classifier_cols = []
for class_p in StreamlitVizTracker.loaded_document_classifier_pipes:
data = class_p.predict(data, output_level='document',multithread=False).dropna()
classifier_cols.append(StreamlitUtilsOS.get_classifier_cols(class_p))
data['text'] = data_split
# drop embeds of classifiers because bad conversion
for c in data.columns :
if 'embedding' in c : data.drop(c, inplace=True,axis=1)
# data['text']
# =data['document']
data['text'] = data_split
for c in data.columns :
if 'sentence_embedding' in c : data.drop(c,inplace=True,axis=1)
p = StreamlitUtilsOS.merge_token_classifiers_with_embed_pipe(p, token_feature_pipe)
# if'token' in data.columns : data.drop(['token','pos'],inplace=True,)
if'pos' in data.columns : data.drop('pos',inplace=True,axis=1)
predictions = p.predict(data,output_level='token',multithread=False).dropna()
e_col = StreamlitUtilsOS.find_embed_col(predictions)
e_com = StreamlitUtilsOS.find_embed_component(p)
e_com_storage_ref = StorageRefUtils.extract_storage_ref(e_com, True)
# embedder_name = StreamlitUtilsOS.extract_name(e_com)
emb = predictions[e_col]
mat = np.array([x for x in emb])
for algo in algos :
if len(mat.shape)>2 : mat = mat.reshape(len(emb),mat.shape[-1])
hover_data = ['token','text','sentiment', 'pos'] # TODO DEDUCT
# calc reduced dimensionality with every algo
if 1 in target_dimensions:
low_dim_data = StreamlitUtilsOS.get_manifold_algo(algo,1,n_jobs).fit_transform(mat)
x = low_dim_data[:,0]
y = np.zeros(low_dim_data[:,0].shape)
tsne_df =
|
pd.DataFrame({'x':x,'y':y, 'text':predictions[text_col], 'pos':predictions.pos, 'sentiment' : predictions.sentiment,'token':predictions.token})
|
pandas.DataFrame
|
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona
from shapely.geometry import MultiPolygon, shape, point, box
from descartes import PolygonPatch
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import geopandas as gpd
# data wrangling libraries
import ftplib, urllib, wget, bz2
from bs4 import BeautifulSoup as bs
class ogh_meta:
"""
The json file that describes the Gridded climate data products
"""
def __init__(self):
self.__meta_data = dict(json.load(open('ogh_meta.json','rb')))
# key-value retrieval
def __getitem__(self, key):
return(self.__meta_data[key])
# key list
def keys(self):
return(self.__meta_data.keys())
# value list
def values(self):
return(self.__meta_data.values())
# print('Version '+datetime.fromtimestamp(os.path.getmtime('ogh.py')).strftime('%Y-%m-%d %H:%M:%S')+' jp')
def saveDictOfDf(outfilename, dictionaryObject):
# write a dictionary of dataframes to a json file using pickle
with open(outfilename, 'wb') as f:
pickle.dump(dictionaryObject, f)
f.close()
def readDictOfDf(infilename):
# read a dictionary of dataframes from a json file using pickle
with open(infilename, 'rb') as f:
dictionaryObject = pickle.load(f)
f.close()
return(dictionaryObject)
def reprojShapefile(sourcepath, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None):
"""
sourcepath: (dir) the path to the .shp file
newprojdictionary: (dict) the new projection definition in the form of a dictionary (default provided)
outpath: (dir) the output path for the new shapefile
"""
# if outpath is none, treat the reprojection as a file replacement
if isinstance(outpath, type(None)):
outpath = sourcepath
shpfile = gpd.GeoDataFrame.from_file(sourcepath)
shpfile = shpfile.to_crs(newprojdictionary)
shpfile.to_file(outpath)
def getFullShape(shapefile):
"""
Generate a MultiPolygon to represent each shape/polygon within the shapefile
shapefile: (dir) a path to the ESRI .shp shapefile
"""
shp = fiona.open(shapefile)
mp = [shape(pol['geometry']) for pol in shp]
mp = MultiPolygon(mp)
shp.close()
return(mp)
def getShapeBbox(polygon):
"""
Generate a geometric box to represent the bounding box for the polygon, shapefile connection, or MultiPolygon
polygon: (geometry) a geometric polygon, MultiPolygon, or shapefile connection
"""
# identify the cardinal bounds
minx, miny, maxx, maxy = polygon.bounds
bbox = box(minx, miny, maxx, maxy, ccw=True)
return(bbox)
def readShapefileTable(shapefile):
"""
read in the datatable captured within the shapefile properties
shapefile: (dir) a path to the ESRI .shp shapefile
"""
#cent_df = gpd.read_file(shapefile)
shp = fiona.open(shapefile)
centroid = [eachpol['properties'] for eachpol in shp]
cent_df = pd.DataFrame.from_dict(centroid, orient='columns')
shp.close()
return(cent_df)
def filterPointsinShape(shape, points_lat, points_lon, points_elev=None, buffer_distance=0.06, buffer_resolution=16,
labels=['LAT', 'LONG_', 'ELEV']):
"""
filter for datafiles that can be used
shape: (geometry) a geometric polygon or MultiPolygon
points_lat: (series) a series of latitude points in WGS84 projection
points_lon: (series) a series of longitude points in WGS84 projection
points_elev: (series) a series of elevation points in meters; optional - default is None
buffer_distance: (float64) a numerical multiplier to increase the geodetic boundary area
buffer_resolution: (float64) the increments between geodetic longlat degrees
labels: (list) a list of preferred labels for latitude, longitude, and elevation
"""
# add buffer region
region = shape.buffer(buffer_distance, resolution=buffer_resolution)
# construct points_elev if null
if isinstance(points_elev, type(None)):
points_elev=np.repeat(np.nan, len(points_lon))
# Intersection each coordinate with the region
limited_list = []
for lon, lat, elev in zip(points_lon, points_lat, points_elev):
gpoint = point.Point(lon, lat)
if gpoint.intersects(region):
limited_list.append([lat, lon, elev])
maptable = pd.DataFrame.from_records(limited_list, columns=labels)
## dask approach ##
#intersection=[]
#for lon, lat, elev in zip(points_lon, points_lat, points_elev):
# gpoint = point.Point(lon, lat)
# intersection.append(dask.delayed(gpoint.intersects(region)))
# limited_list.append([intersection, lat, lon, elev])
# convert to dataframe
#maptable = pd.DataFrame({labels[0]:points_lat, labels[1]:points_lon, labels[2]:points_elev}
# .loc[dask.compute(intersection)[0],:]
# .reset_index(drop=True)
return(maptable)
def scrapeurl(url, startswith=None, hasKeyword=None):
"""
scrape the gridded datafiles from a url of interest
url: (str) the web folder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
hasKeyword: (str) keywords represented in a webpage element; default is None
"""
# grab the html of the url, and prettify the html structure
page = urllib2.urlopen(url).read()
page_soup = bs(page, 'lxml')
page_soup.prettify()
# loop through and filter the hyperlinked lines
if pd.isnull(startswith):
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if hasKeyword in anchor['href']]
else:
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if anchor['href'].startswith(startswith)]
# convert to dataframe then separate the lon and lat as float coordinate values
temp = pd.DataFrame(temp, columns = ['filenames'])
return(temp)
def treatgeoself(shapefile, NAmer, folder_path=os.getcwd(), outfilename='mappingfile.csv', buffer_distance=0.06):
"""
TreatGeoSelf to some [data] lovin'!
shapefile: (dir) the path to an ESRI shapefile for the region of interest
Namer: (dir) the path to an ESRI shapefile, which has each 1/16th coordinate and elevation information from a DEM
folder_path: (dir) the destination folder path for the mappingfile output; default is the current working directory
outfilename: (str) the name of the output file; default name is 'mappingfile.csv'
buffer_distance: (float64) the multiplier to be applied for increasing the geodetic boundary area; default is 0.06
"""
# conform projections to longlat values in WGS84
reprojShapefile(shapefile, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None)
# read shapefile into a multipolygon shape-object
shape_mp = getFullShape(shapefile)
# read in the North American continental DEM points for the station elevations
NAmer_datapoints = readShapefileTable(NAmer).rename(columns={'Lat':'LAT','Long':'LONG_','Elev':'ELEV'})
# generate maptable
maptable = filterPointsinShape(shape_mp,
points_lat=NAmer_datapoints.LAT,
points_lon=NAmer_datapoints.LONG_,
points_elev=NAmer_datapoints.ELEV,
buffer_distance=buffer_distance, buffer_resolution=16, labels=['LAT', 'LONG_', 'ELEV'])
maptable.reset_index(inplace=True)
maptable = maptable.rename(columns={"index":"FID"})
print(maptable.shape)
print(maptable.tail())
# print the mappingfile
mappingfile=os.path.join(folder_path, outfilename)
maptable.to_csv(mappingfile, sep=',', header=True, index=False)
return(mappingfile)
def mapContentFolder(resid):
"""
map the content folder within HydroShare
resid: (str) a string hash that represents the hydroshare resource that has been migrated
"""
path = os.path.join('/home/jovyan/work/notebooks/data', str(resid), str(resid), 'data/contents')
return(path)
# ### CIG (DHSVM)-oriented functions
def compile_bc_Livneh2013_locations(maptable):
"""
compile a list of file URLs for bias corrected Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/Livneh/bcLivneh_WWA_2013/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_Livneh2013_locations(maptable):
"""
compile a list of file URLs for Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://www.cses.washington.edu/rocinante/Livneh/Livneh_WWA_2013/forcs_dhsvm/',basename]
locations.append(''.join(url))
return(locations)
### VIC-oriented functions
def compile_VICASCII_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 VIC.ASCII outputs
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Fluxes_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/VIC.ASCII/latitude.",str(row['LAT']),'/',loci,'.bz2']
locations.append(''.join(url))
return(locations)
def compile_VICASCII_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 VIC.ASCII outputs for the USA
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2/',
startswith='fluxes')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['VIC_fluxes_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
### Climate (Meteorological observations)-oriented functions
def canadabox_bc():
"""
Establish the Canadian (north of the US bounding boxes) Columbia river basin bounding box
"""
# left, bottom, right top
return(box(-138.0, 49.0, -114.0, 53.0))
def scrape_domain(domain, subdomain, startswith=None):
"""
scrape the gridded datafiles from a url of interest
domain: (str) the web folder path
subdomain: (str) the subfolder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
"""
# connect to domain
ftp = ftplib.FTP(domain)
ftp.login()
ftp.cwd(subdomain)
# scrape for data directories
tmp = [dirname for dirname in ftp.nlst() if dirname.startswith(startswith)]
geodf = pd.DataFrame(tmp, columns=['dirname'])
# conform to bounding box format
tmp = geodf['dirname'].apply(lambda x: x.split('.')[1:])
tmp = tmp.apply(lambda x: list(map(float,x)) if len(x)>2 else x)
# assemble the boxes
geodf['bbox']=tmp.apply(lambda x: box(x[0]*-1, x[2]-1, x[1]*-1, x[3]) if len(x)>2 else canadabox_bc())
return(geodf)
def mapToBlock(df_points, df_regions):
for index, eachblock in df_regions.iterrows():
for ind, row in df_points.iterrows():
if point.Point(row['LONG_'], row['LAT']).intersects(eachblock['bbox']):
df_points.loc[ind, 'blocks'] = str(eachblock['dirname'])
return(df_points)
def compile_dailyMET_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2/',
startswith='data')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
def compile_dailyMET_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/ascii/daily/latitude.", str(row['LAT']),"/",loci,".bz2"]
locations.append(''.join(url))
return(locations)
# ### WRF-oriented functions
def compile_wrfnnrp_raw_Salathe2014_locations(maptable):
"""
compile a list of file URLs for Salathe et al., 2014 raw WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/raw/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_wrfnnrp_bc_Salathe2014_locations(maptable):
"""
compile a list of file URLs for the Salathe et al., 2014 bias corrected WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/bc/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
# ## Data file migration functions
def ensure_dir(f):
"""
check if the destination folder directory exists; if not, create it and set it as the working directory
f: (dir) the directory to create and/or set as working directory
"""
if not os.path.exists(f):
os.makedirs(f)
os.chdir(f)
def wget_download(listofinterest):
"""
Download files from an http domain
listofinterest: (list) a list of urls to request
"""
# check and download each location point, if it doesn't already exist in the download directory
for fileurl in listofinterest:
basename = os.path.basename(fileurl)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
# Download the files to the subdirectory
def wget_download_one(fileurl):
"""
Download a file from an http domain
fileurl: (url) a url to request
"""
# check and download each location point, if it doesn't already exist in the download directory
basename=os.path.basename(fileurl)
# if it exists, remove for new download (overwrite mode)
if os.path.isfile(basename):
os.remove(basename)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
def wget_download_p(listofinterest, nworkers=20):
"""
Download files from an http domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 10
"""
pool = Pool(int(nworkers))
pool.map(wget_download_one, listofinterest)
pool.close()
pool.terminate()
def ftp_download(listofinterest):
"""
Download and decompress files from an ftp domain
listofinterest: (list) a list of urls to request
"""
for loci in listofinterest:
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_one(loci):
"""
Download and decompress a file from an ftp domain
loci: (url) a url to request
"""
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_p(listofinterest, nworkers=5):
"""
Download and decompress files from an ftp domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 5
"""
pool = Pool(int(nworkers))
pool.map(ftp_download_one, listofinterest)
pool.close()
pool.terminate()
def decompbz2(filename):
"""
Extract a file from a bz2 file of the same name, then remove the bz2 file
filename: (dir) the file path for a bz2 compressed file
"""
with open(filename.split(".bz2",1)[0], 'wb') as new_file, open(filename, 'rb') as zipfile:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda : zipfile.read(100 * 1024), b''):
new_file.write(decompressor.decompress(data))
os.remove(filename)
zipfile.close()
new_file.close()
print(os.path.splitext(filename)[0] + ' unzipped')
def catalogfiles(folderpath):
"""
make a catalog of the gridded files within a folderpath
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
"""
# read in downloaded files
temp = [eachfile for eachfile in os.listdir(folderpath) if not os.path.isdir(eachfile)]
if len(temp)==0:
# no files were available; setting default catalog output structure
catalog = pd.DataFrame([], columns=['filenames','LAT','LONG_'])
else:
# create the catalog dataframe and extract the filename components
catalog = pd.DataFrame(temp, columns=['filenames'])
catalog[['LAT','LONG_']] = catalog['filenames'].apply(lambda x: pd.Series(str(x).rsplit('_',2))[1:3]).astype(float)
# convert the filenames column to a filepath
catalog['filenames'] = catalog['filenames'].apply(lambda x: os.path.join(folderpath, x))
return(catalog)
def addCatalogToMap(outfilepath, maptable, folderpath, catalog_label):
"""
Update the mappingfile with a new column, a vector of filepaths for the downloaded files
outfilepath: (dir) the path for the output file
maptable: (dataframe) a dataframe containing the FID, LAT, LONG_, and ELEV information
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# assert catalog_label as a string-object
catalog_label = str(catalog_label)
# catalog the folder directory
catalog = catalogfiles(folderpath).rename(columns={'filenames':catalog_label})
# drop existing column
if catalog_label in maptable.columns:
maptable = maptable.drop(labels=catalog_label, axis=1)
# update with a vector for the catalog of files
maptable = maptable.merge(catalog, on=['LAT','LONG_'], how='left')
# remove blocks, if they were needed
if 'blocks' in maptable.columns:
maptable = maptable.drop(labels=['blocks'], axis=1)
# write the updated mappingfile
maptable.to_csv(outfilepath, header=True, index=False)
# Wrapper scripts
def getDailyMET_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/raw', catalog_label='dailymet_livneh2013'):
"""
Get the Livneh el al., 2013 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate DailyMET livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_MET_1950_2013/raw', catalog_label='dailymet_livneh2015'):
"""
Get the Livneh el al., 2015 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily MET livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_bcLivneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/bc', catalog_label='dailymet_bclivneh2013'):
"""
Get the Livneh el al., 2013 bias corrected Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate baseline_corrected livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable =
|
pd.read_csv(mappingfile)
|
pandas.read_csv
|
# Preprocessing
import os, matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 50)
import numpy as np
import xgboost as xgb
import xgbfir
import pdb
import time
np.random.seed(1337)
def client_anaylsis():
"""
The idea here is to unify the client ID of several different customers to more broad categories.
"""
# clean duplicate spaces in client names
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client_df["NombreCliente"] = client_df["NombreCliente"].str.lower()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
special_list = ["^(yepas)\s.*", "^(oxxo)\s.*", "^(bodega\scomercial)\s.*", "^(bodega\saurrera)\s.*", "^(bodega)\s.*",
"^(woolwort|woolworth)\s.*", "^(zona\sexpress)\s.*",
"^(zacatecana)\s.*", "^(yza)\s.*",
"^(yanet)\s.*", "^(yak)\s.*",
"^(wings)\s.*", "^(wendy)\s.*", "^(walmart\ssuper)\s?.*", "^(waldos)\s.*",
"^(wal\smart)\s.*", "^(vulcanizadora)\s.*", "^(viveres\sy\sservicios)\s.*",
"^(vips)\s.*", "^(vinos\sy\slicores)\s.*", "^(tienda\ssuper\sprecio)\s.*",
"^(vinos\sy\sabarrotes)\s.*", "^(vinateria)\s.*", "^(video\sjuegos)\s.*", "^(universidad)\s.*",
"^(tiendas\stres\sb)\s.*", "^(toks)\s.*","^(tkt\ssix)\s.*",
"^(torteria)\s.*", "^(tortas)\s.*", "^(super\sbara)\s.*",
"^(tiendas\sde\ssuper\sprecio)\s.*", "^(ultramarinos)\s.*", "^(tortilleria)\s.*",
"^(tienda\sde\sservicio)\s.*", "^(super\sx)\s.*", "^(super\swillys)\s.*",
"^(super\ssanchez)\s.*", "^(super\sneto)\s.*", "^(super\skompras)\s.*",
"^(super\skiosco)\s.*", "^(super\sfarmacia)\s.*", "^(super\scarnes)\s.*",
"^(super\scarniceria)\s.*", "^(soriana)\s.*", "^(super\scenter)\s.*",
"^(solo\sun\sprecio)\s.*", "^(super\scity)\s.*", "^(super\sg)\s.*", "^(super\smercado)\s.*",
"^(sdn)\s.*", "^(sams\sclub)\s.*", "^(papeleria)\s.*", "^(multicinemas)\s.*",
"^(mz)\s.*", "^(motel)\s.*", "^(minisuper)\s.*", "^(mini\stienda)\s.*",
"^(mini\ssuper)\s.*", "^(mini\smarket)\s.*", "^(mini\sabarrotes)\s.*", "^(mi\sbodega)\s.*",
"^(merza|merzapack)\s.*", "^(mercado\ssoriana)\s.*", "^(mega\scomercial)\s.*",
"^(mc\sdonalds)\s.*", "^(mb)\s[^ex].*", "^(maquina\sfma)\s.*", "^(ley\sexpress)\s.*",
"^(lavamatica)\s.*", "^(kiosko)\s.*", "^(kesos\sy\skosas)\s.*", "^(issste)\s.*",
"^(hot\sdogs\sy\shamburguesas|)\s.*", "^(hamburguesas\sy\shot\sdogs)\s.*", "(hot\sdog)",
"^(hospital)\s.*", "^(hiper\ssoriana)\s.*", "^(super\sahorros)\s.*", "^(super\sabarrotes)\s.*",
"^(hambuerguesas|hamburguesas|hamburgesas)\s.*", "^(gran\sbodega)\s.*",
"^(gran\sd)\s.*", "^(go\smart)\s.*", "^(gasolinera)\s.*", "^(fundacion)\s.*",
"^(fruteria)\s.*", "^(frutas\sy\sverduras)\s.*", "^(frutas\sy\slegumbres)\s.*",
"^(frutas\sy\sabarrotes)\s.*", "^(fma)\s.*", "^(fiesta\sinn)\s.*", "^(ferreteria)\s.*",
"^(farmacon)\s.*", "^(farmacias)\s.*", "^(farmacia\syza)\s.*",
"^(farmacia\smoderna)\s.*", "^(farmacia\slopez)\s.*",
"^(farmacia\sissste)\s.*", "^(farmacia\sisseg)\s.*", "^(farmacia\sguadalajara)\s.*",
"^(farmacia\sesquivar)\s.*", "^(farmacia\scalderon)\s.*", "^(farmacia\sbenavides)\s.*",
"^(farmacia\sabc)\s.*", "^(farmacia)\s.*", "^(farm\sguadalajara)\s.*",
"^(facultad\sde)\s.*", "^(f\sgdl)\s.*", "^(expendio)\s.*", "^(expendio\sde\span)\s.*",
"^(expendio\sde\shuevo)\s.*", "^(expendio\sbimbo)\s.*", "^(expendedoras\sautomaticas)\s.*",
"^(estic)\s.*", "^(estancia\sinfantil)\s.*", "^(estacionamiento)\s.*", "^(estanquillo)\s.*",
"^(estacion\sde\sservicio)\s.*", "^(establecimientos?)\s.*",
"^(escuela\suniversidad|esc\suniversidad)\s.*", "^(escuela\stelesecundaria|esc\stelesecundaria)\s.*",
"^(escuela\stecnica|esc\stecnica)\s.*",
"^(escuela\ssuperior|esc\ssuperior)\s.*", "^(escuela\ssecundaria\stecnica|esc\ssecundaria\stecnica)\s.*",
"^(escuela\ssecundaria\sgeneral|esc\ssecundaria\sgeneral)\s.*",
"^(escuela\ssecundaria\sfederal|esc\ssecundaria\sfederal)\s.*",
"^(escuela\ssecundaria|esc\ssecundaria)\s.*", "^(escuela\sprimaria|esc\sprimaria)\s.*",
"^(escuela\spreparatoria|esc\spreparatoria)\s.*", "^(escuela\snormal|esc\snormal)\s.*",
"^(escuela\sinstituto|esc\sinstituto)\s.*", "^(esc\sprepa|esc\sprep)\s.*",
"^(escuela\scolegio|esc\scolegio)\s.*", "^(escuela|esc)\s.*", "^(dunosusa)\s.*",
"^(ferreteria)\s.*", "^(dulces)\s.*", "^(dulceria)\s.*", "^(dulce)\s.*", "^(distribuidora)\s.*",
"^(diconsa)\s.*", "^(deposito)\s.*", "^(del\srio)\s.*", "^(cyber)\s.*", "^(cremeria)\s.*",
"^(cosina\seconomica)\s.*", "^(copy).*", "^(consumo|consumos)\s.*","^(conalep)\s.*",
"^(comercializadora)\s.*", "^(comercial\ssuper\salianza)\s.*",
"^(comercial\smexicana)\s.*", "^(comedor)\s.*", "^(colegio\sde\sbachilleres)\s.*",
"^(colegio)\s.*", "^(coffe).*", "^(cocteleria|cockteleria)\s.*", "^(cocina\seconomica)\s.*",
"^(cocina)\s.*", "^(cobaev)\s.*", "^(cobaes)\s.*", "^(cobaeh)\s.*", "^(cobach)\s.*",
"^(club\sde\sgolf)\s.*", "^(club\scampestre)\s.*", "^(city\sclub)\s.*", "^(circulo\sk)\s.*",
"^(cinepolis)\s.*", "^(cinemex)\s.*", "^(cinemas)\s.*", "^(cinemark)\s.*", "^(ciber)\s.*",
"^(church|churchs)\s.*", "^(chilis)\s.*", "^(chiles\sy\ssemillas)\s.*", "^(chiles\ssecos)\s.*",
"^(chedraui)\s.*", "^(cetis)\s.*", "^(cervefrio)\s.*", "^(cervefiesta)\s.*",
"^(cerveceria)\s.*", "^(cervecentro)\s.*", "^(centro\sescolar)\s.*", "^(centro\seducativo)\s.*",
"^(centro\sde\sestudios)\s.*", "^(centro\scomercial)\s.*", "^(central\sde\sautobuses)\s.*",
"^(cecytem)\s.*", "^(cecytec)\s.*", "^(cecyte)\s.*", "^(cbtis)\s.*", "^(cbta)\s.*", "^(cbt)\s.*",
"^(caseta\stelefonica)\s.*", "^(caseta)\s.*", "^(casa\sley)\s.*", "^(casa\shernandez)\s.*",
"^(cartonero\scentral)\s.*", "^(carniceria)\s.*", "^(carne\smart)\s.*", "^(calimax)\s.*",
"^(cajero)\s.*", "^(cafeteria)\s.*", "^(cafe)\s.*", "^(burritos)\s.*",
"^(burguer\sking|burger\sking)\s.*", "^(bip)\s.*", "^(bimbo\sexpendio)\s.*",
"^(burguer|burger)\s.*", "^(ba.os)\s.*", "^(bae)\s.*", "^(bachilleres)\s.*", "^(bachillerato)\s.*",
"^(autosercivio|auto\sservicio)\s.*", "^(autolavado|auto\slavado)\s.*",
"^(autobuses\sla\spiedad|autobuses\sde\sla\piedad)\s.*", "^(arrachera)\s.*",
"^(alsuper\sstore)\s.*", "^(alsuper)\s.*", "^(academia)\s.*", "^(abts)\s.*",
"^(abarrotera\slagunitas)\s.*", "^(abarrotera)\s.*", "^(abarrotes\sy\svinos)\s.*",
"^(abarrotes\sy\sverduras)\s.*", "^(abarrotes\sy\ssemillas)\s.*",
"^(abarrotes\sy\spapeleria)\s.*", "^(abarrotes\sy\snovedades)\s.*", "^(abarrotes\sy\sfruteria)\s.*",
"^(abarrotes\sy\sdeposito)\s.*", "^(abarrotes\sy\scremeria)\s.*", "^(abarrotes\sy\scarniceria)\s.*",
"^(abarrotes\svinos\sy\slicores)\s.*", "^(abarrote|abarrotes|abarotes|abarr|aba|ab)\s.*",
"^(7\seleven)\s.*", "^(7\s24)\s.*"]
client_df["NombreCliente2"] = client_df["NombreCliente"]
for var in special_list:
client_df[var] = client_df["NombreCliente"].str.extract(var, expand=False).str.upper()
replace = client_df.loc[~client_df[var].isnull(), var]
client_df.loc[~client_df[var].isnull(),"NombreCliente2"] = replace
client_df.drop(var, axis=1, inplace=True)
client_df.drop("NombreCliente", axis=1, inplace=True)
client_df.to_csv("../data/cliente_tabla2.csv.gz", compression="gzip", index=False)
def client_anaylsis2():
"""
The idea here is to unify the client ID of several different customers to more broad categories in another
different way
"""
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
# clean duplicate spaces in client names
client_df["NombreCliente"] = client_df["NombreCliente"].str.upper()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
# --- Begin Filtering for specific terms
# Note that the order of filtering is significant.
# For example:
# The regex of .*ERIA.* will assign "FRUITERIA" to 'Eatery' rather than 'Fresh Market'.
# In other words, the first filters to occur have a bigger priority.
def filter_specific(vf2):
# Known Large Company / Special Group Types
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*REMISION.*', 'Consignment')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*WAL MART.*', '.*SAMS CLUB.*'], 'Walmart', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*OXXO.*', 'Oxxo Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*CONASUPO.*', 'Govt Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*BIMBO.*', 'Bimbo Store')
# General term search for a random assortment of words I picked from looking at
# their frequency of appearance in the data and common spanish words for these categories
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COLEG.*', '.*UNIV.*', '.*ESCU.*', '.*INSTI.*', \
'.*PREPAR.*'], 'School', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*PUESTO.*', 'Post')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*FARMA.*', '.*HOSPITAL.*', '.*CLINI.*', '.*BOTICA.*'],
'Hospital/Pharmacy', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*CAFE.*', '.*CREMERIA.*', '.*DULCERIA.*', \
'.*REST.*', '.*BURGER.*', '.*TACO.*', '.*TORTA.*', \
'.*TAQUER.*', '.*HOT DOG.*', '.*PIZZA.*' \
'.*COMEDOR.*', '.*ERIA.*', '.*BURGU.*'], 'Eatery',
regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*SUPER.*', 'Supermarket')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COMERCIAL.*', '.*BODEGA.*', '.*DEPOSITO.*', \
'.*ABARROTES.*', '.*MERCADO.*', '.*CAMBIO.*', \
'.*MARKET.*', '.*MART .*', '.*MINI .*', \
'.*PLAZA.*', '.*MISC.*', '.*ELEVEN.*', '.*EXP.*', \
'.*SNACK.*', '.*PAPELERIA.*', '.*CARNICERIA.*', \
'.*LOCAL.*', '.*COMODIN.*', '.*PROVIDENCIA.*'
], 'General Market/Mart' \
, regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*VERDU.*', '.*FRUT.*'], 'Fresh Market', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*HOTEL.*', '.*MOTEL.*', ".*CASA.*"], 'Hotel', regex=True)
filter_specific(client_df)
# --- Begin filtering for more general terms
# The idea here is to look for names with particles of speech that would
# not appear in a person's name.
# i.e. "Individuals" should not contain any participles or numbers in their names.
def filter_participle(vf2):
vf2['NombreCliente'] = vf2['NombreCliente'].replace([
'.*LA .*', '.*EL .*', '.*DE .*', '.*LOS .*', '.*DEL .*', '.*Y .*', '.*SAN .*', '.*SANTA .*', \
'.*AG .*', '.*LAS .*', '.*MI .*', '.*MA .*', '.*II.*', '.*[0-9]+.*' \
], 'Small Franchise', regex=True)
filter_participle(client_df)
# Any remaining entries should be "Individual" Named Clients, there are some outliers.
# More specific filters could be used in order to reduce the percentage of outliers in this final set.
def filter_remaining(vf2):
def function_word(data):
# Avoid the single-words created so far by checking for upper-case
if (data.isupper()) and (data != "NO IDENTIFICADO"):
return 'Individual'
else:
return data
vf2['NombreCliente'] = vf2['NombreCliente'].map(function_word)
filter_remaining(client_df)
client_df.rename(columns={"NombreCliente": "client_name3"}, inplace=True)
client_df.to_csv("../data/cliente_tabla3.csv.gz", compression="gzip", index=False)
def preprocess(save=False):
start = time.time()
dtype_dict = {"Semana": np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8,
'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16,
'Demanda_uni_equil': np.uint32, "Venta_hoy": np.float32, "Venta_uni_hoy": np.uint32,
"Dev_uni_proxima": np.uint32, "Dev_proxima": np.float32}
train = pd.read_csv("../data/train.csv.zip", compression="zip", dtype=dtype_dict)
test = pd.read_csv("../data/test.csv.zip", compression="zip", dtype=dtype_dict)
# train = train.sample(100000)
# test = test.sample(100000)
# We calculate out-of-sample mean features from most of the training data and only train from the samples in week 9.
# Out-of-sample mean features for training are calculated from all weeks before week 9 and for the test set from
# all weeks including week 9
mean_dataframes = {}
mean_dataframes["train"] = train[train["Semana"]<9].copy()
mean_dataframes["test"] = train.copy()
print("complete train obs: {}".format(len(train)))
print("train week 9 obs: {}".format(len(train[train["Semana"] == 9])))
train = train[train["Semana"] == 9]
# not used in later stages. Was used to find the right hyperparameters for XGBoost. After finding them and to
# obtain the best solution the evaluation data was incorporated into the training data and the hyperparameters
# were used "blindly"
# eval = train.iloc[int(len(train) * 0.75):, :].copy()
# print("eval obs: {}".format(len(eval)))
# mean_dataframes["eval"] = mean_dataframes["test"].iloc[:eval.index.min(), :].copy()
# train = train.iloc[:int(len(train) * 0.75), :]
# print("train obs: {}".format(len(train)))
# read data files and create new client ids
town = pd.read_csv("../data/town_state.csv.zip", compression="zip")
product = pd.read_csv("../data/producto_tabla.csv.zip", compression="zip")
client =
|
pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
|
pandas.read_csv
|
import geopandas as gpd
import pandas as pd
import numpy as np
shapefile = 'ne_110m_admin_0_countries.shp'
"""
Finds all the data for a given year, along with country shapes
@:param year: the year for which to get the awards
"""
def get_data(year):
global shapefile
gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]
# print("In plot_data")
# Rename columns
gdf.columns = ['country', 'country_code', 'geometry']
# ACTOR AWARDS
datafile1berlin = './data/berlin_best_actor.csv'
# Read csv file using pandas
df1berlin = pd.read_csv(datafile1berlin, sep=',', names=['Year', 'Actor', 'Countries'], skiprows=1)
df1berlin = df1berlin.loc[df1berlin['Year'] == year]
datafile1cannes = './data/cannes_best_actor.csv'
# Read csv file using pandas
df1cannes = pd.read_csv(datafile1cannes, sep=',', names=['Year', 'Actor', 'Countries'], skiprows=1)
df1cannes = df1cannes.loc[df1cannes['Year'] == year]
datafile1venice = './data/venice_best_actor.csv'
# Read csv file using pandas
df1venice = pd.read_csv(datafile1venice, sep=',', names=['Year', 'Actor', 'Countries'], skiprows=1)
df1venice = df1venice.loc[df1venice['Year'] == year]
df1 = df1berlin.append(df1cannes, sort=False).append(df1venice, sort=False)
# ACTRESS AWARDS
datafile2berlin = './data/berlin_best_actress.csv'
# Read csv file using pandas
df2berlin = pd.read_csv(datafile2berlin, sep=',', names=['Year', 'Actress', 'Countries'], skiprows=1)
df2berlin = df2berlin.loc[df2berlin['Year'] == year]
datafile2cannes = './data/cannes_best_actress.csv'
# Read csv file using pandas
df2cannes = pd.read_csv(datafile2cannes, sep=',', names=['Year', 'Actress', 'Countries'], skiprows=1)
df2cannes = df2cannes.loc[df2cannes['Year'] == year]
datafile2venice = './data/venice_best_actress.csv'
# Read csv file using pandas
df2venice = pd.read_csv(datafile2venice, sep=',', names=['Year', 'Actress', 'Countries'], skiprows=1)
df2venice = df2venice.loc[df2venice['Year'] == year]
df2 = df2berlin.append(df2cannes, sort=False).append(df2venice, sort=False)
# DIRECTOR AWARDS
datafile3berlin = './data/berlin_best_director.csv'
# Read csv file using pandas
df3berlin = pd.read_csv(datafile3berlin, sep=',', names=['Year', 'Director', 'Countries'], skiprows=1)
df3berlin = df3berlin.loc[df3berlin['Year'] == year]
datafile3cannes = './data/cannes_best_director.csv'
# Read csv file using pandas
df3cannes = pd.read_csv(datafile3cannes, sep=',', names=['Year', 'Director', 'Countries'], skiprows=1)
df3cannes = df3cannes.loc[df3cannes['Year'] == year]
datafile3venice = './data/venice_best_director.csv'
# Read csv file using pandas
df3venice = pd.read_csv(datafile3venice, sep=',', names=['Year', 'Director', 'Countries'], skiprows=1)
df3venice = df3venice.loc[df3venice['Year'] == year]
df3 = df3berlin.append(df3cannes, sort=False).append(df3venice, sort=False)
# FILM AWARDS
datafile4berlin = './data/berlin_best_film.csv'
# Read csv file using pandas
df4berlin =
|
pd.read_csv(datafile4berlin, sep=',', names=['Year', 'Movie', 'Countries'], skiprows=1)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #carbon content in biomass
c_cont_po_plasma = 0.5454
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S1nu = df1nu['Firewood_other_energy_use'].values
c_firewood_energy_S1pl = df1pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp(t,remainAGB):
return (1-(1-np.exp(-a*t))**b)*remainAGB
#set zero matrix
output_decomp = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp[i:,i] = decomp(t[:len(t)-i],remain_part)
print(output_decomp[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix[:,i] = np.diff(output_decomp[:,i])
i = i + 1
print(subs_matrix[:,:4])
print(len(subs_matrix))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix = subs_matrix.clip(max=0)
print(subs_matrix[:,:4])
#make the results as absolute values
subs_matrix = abs(subs_matrix)
print(subs_matrix[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix)
subs_matrix = np.vstack((zero_matrix, subs_matrix))
print(subs_matrix[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot = (tf,1)
decomp_emissions = np.zeros(matrix_tot)
i = 0
while i < tf:
decomp_emissions[:,0] = decomp_emissions[:,0] + subs_matrix[:,i]
i = i + 1
print(decomp_emissions[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM1nu = DynamicStockModel(t = df1nu['Year'].values, i = df1nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM1pl = DynamicStockModel(t = df1pl['Year'].values, i = df1pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1nu, ExitFlag1nu = TestDSM1nu.dimension_check()
CheckStr1pl, ExitFlag1nu = TestDSM1pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort1nu, ExitFlag1nu = TestDSM1nu.compute_s_c_inflow_driven()
Stock_by_cohort1pl, ExitFlag1pl = TestDSM1pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S1nu, ExitFlag1nu = TestDSM1nu.compute_stock_total()
S1pl, ExitFlag1pl = TestDSM1pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C1nu, ExitFlag1nu = TestDSM1nu.compute_o_c_from_s_c()
O_C1pl, ExitFlag1pl = TestDSM1pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O1nu, ExitFlag1nu = TestDSM1nu.compute_outflow_total()
O1pl, ExitFlag1pl = TestDSM1pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS1nu, ExitFlag1nu = TestDSM1nu.compute_stock_change()
DS1pl, ExitFlag1pl = TestDSM1pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal1nu, ExitFlag1nu = TestDSM1nu.check_stock_balance()
Bal1pl, ExitFlag1pl = TestDSM1pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM1nu.o)
print(TestDSM1pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#plt.plot(TestDSM1.s)
#plt.xlim([0, 100])
#plt.ylim([0,50])
#plt.show()
#%%
#Step (5): Biomass growth
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S1nu = df1nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S1pl = df1pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S1nu = df1nu['PH_Emissions_PO'].values
PH_Emissions_PO_S1pl = df1pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1nu(t,remainAGB_CH4_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1nu
#set zero matrix
output_decomp_CH4_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1nu[i:,i] = decomp_CH4_S1nu(t[:len(t)-i],remain_part_CH4_S1nu)
print(output_decomp_CH4_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1nu[:,i] = np.diff(output_decomp_CH4_S1nu[:,i])
i = i + 1
print(subs_matrix_CH4_S1nu[:,:4])
print(len(subs_matrix_CH4_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1nu = subs_matrix_CH4_S1nu.clip(max=0)
print(subs_matrix_CH4_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1nu = abs(subs_matrix_CH4_S1nu)
print(subs_matrix_CH4_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1nu)
subs_matrix_CH4_S1nu = np.vstack((zero_matrix_CH4_S1nu, subs_matrix_CH4_S1nu))
print(subs_matrix_CH4_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1nu = (tf,1)
decomp_tot_CH4_S1nu = np.zeros(matrix_tot_CH4_S1nu)
i = 0
while i < tf:
decomp_tot_CH4_S1nu[:,0] = decomp_tot_CH4_S1nu[:,0] + subs_matrix_CH4_S1nu[:,i]
i = i + 1
print(decomp_tot_CH4_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1pl(t,remainAGB_CH4_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1pl
#set zero matrix
output_decomp_CH4_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1pl[i:,i] = decomp_CH4_S1pl(t[:len(t)-i],remain_part_CH4_S1pl)
print(output_decomp_CH4_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1pl[:,i] = np.diff(output_decomp_CH4_S1pl[:,i])
i = i + 1
print(subs_matrix_CH4_S1pl[:,:4])
print(len(subs_matrix_CH4_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1pl = subs_matrix_CH4_S1pl.clip(max=0)
print(subs_matrix_CH4_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1pl= abs(subs_matrix_CH4_S1pl)
print(subs_matrix_CH4_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1pl)
subs_matrix_CH4_S1pl = np.vstack((zero_matrix_CH4_S1pl, subs_matrix_CH4_S1pl))
print(subs_matrix_CH4_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1pl = (tf,1)
decomp_tot_CH4_S1pl = np.zeros(matrix_tot_CH4_S1pl)
i = 0
while i < tf:
decomp_tot_CH4_S1pl[:,0] = decomp_tot_CH4_S1pl[:,0] + subs_matrix_CH4_S1pl[:,i]
i = i + 1
print(decomp_tot_CH4_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CH4_Enu(t,remainAGB_CH4_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Enu
#set zero matrix
output_decomp_CH4_Enu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Enu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Enu[i:,i] = decomp_CH4_Enu(t[:len(t)-i],remain_part_CH4_Enu)
print(output_decomp_CH4_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Enu[:,i] = np.diff(output_decomp_CH4_Enu[:,i])
i = i + 1
print(subs_matrix_CH4_Enu[:,:4])
print(len(subs_matrix_CH4_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Enu = subs_matrix_CH4_Enu.clip(max=0)
print(subs_matrix_CH4_Enu[:,:4])
#make the results as absolute values
subs_matrix_CH4_Enu = abs(subs_matrix_CH4_Enu)
print(subs_matrix_CH4_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Enu)
subs_matrix_CH4_Enu = np.vstack((zero_matrix_CH4_Enu, subs_matrix_CH4_Enu))
print(subs_matrix_CH4_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Enu = (tf,1)
decomp_tot_CH4_Enu= np.zeros(matrix_tot_CH4_Enu)
i = 0
while i < tf:
decomp_tot_CH4_Enu[:,0] = decomp_tot_CH4_Enu[:,0] + subs_matrix_CH4_Enu[:,i]
i = i + 1
print(decomp_tot_CH4_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CH4_Epl(t,remainAGB_CH4_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Epl
#set zero matrix
output_decomp_CH4_Epl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Epl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Epl[i:,i] = decomp_CH4_Epl(t[:len(t)-i],remain_part_CH4_Epl)
print(output_decomp_CH4_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Epl[:,i] = np.diff(output_decomp_CH4_Epl[:,i])
i = i + 1
print(subs_matrix_CH4_Epl[:,:4])
print(len(subs_matrix_CH4_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Epl = subs_matrix_CH4_Epl.clip(max=0)
print(subs_matrix_CH4_Epl[:,:4])
#make the results as absolute values
subs_matrix_CH4_Epl = abs(subs_matrix_CH4_Epl)
print(subs_matrix_CH4_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Epl)
subs_matrix_CH4_Epl = np.vstack((zero_matrix_CH4_Epl, subs_matrix_CH4_Epl))
print(subs_matrix_CH4_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Epl = (tf,1)
decomp_tot_CH4_Epl = np.zeros(matrix_tot_CH4_Epl)
i = 0
while i < tf:
decomp_tot_CH4_Epl[:,0] = decomp_tot_CH4_Epl[:,0] + subs_matrix_CH4_Epl[:,i]
i = i + 1
print(decomp_tot_CH4_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1nu,label='CH4_S1nu')
plt.plot(t,decomp_tot_CH4_S1pl,label='CH4_S1pl')
plt.plot(t,decomp_tot_CH4_Enu,label='CH4_Enu')
plt.plot(t,decomp_tot_CH4_Epl,label='CH4_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1nu(t,remainAGB_CO2_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S1nu
#set zero matrix
output_decomp_CO2_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1nu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1nu[i:,i] = decomp_CO2_S1nu(t[:len(t)-i],remain_part_CO2_S1nu)
print(output_decomp_CO2_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1nu[:,i] = np.diff(output_decomp_CO2_S1nu[:,i])
i = i + 1
print(subs_matrix_CO2_S1nu[:,:4])
print(len(subs_matrix_CO2_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1nu = subs_matrix_CO2_S1nu.clip(max=0)
print(subs_matrix_CO2_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1nu = abs(subs_matrix_CO2_S1nu)
print(subs_matrix_CO2_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1nu)
subs_matrix_CO2_S1nu = np.vstack((zero_matrix_CO2_S1nu, subs_matrix_CO2_S1nu))
print(subs_matrix_CO2_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1nu = (tf,1)
decomp_tot_CO2_S1nu = np.zeros(matrix_tot_CO2_S1nu)
i = 0
while i < tf:
decomp_tot_CO2_S1nu[:,0] = decomp_tot_CO2_S1nu[:,0] + subs_matrix_CO2_S1nu[:,i]
i = i + 1
print(decomp_tot_CO2_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1pl(t,remainAGB_CO2_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S1pl
#set zero matrix
output_decomp_CO2_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1pl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1pl[i:,i] = decomp_CO2_S1pl(t[:len(t)-i],remain_part_CO2_S1pl)
print(output_decomp_CO2_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1pl[:,i] = np.diff(output_decomp_CO2_S1pl[:,i])
i = i + 1
print(subs_matrix_CO2_S1pl[:,:4])
print(len(subs_matrix_CO2_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1pl = subs_matrix_CO2_S1pl.clip(max=0)
print(subs_matrix_CO2_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1pl= abs(subs_matrix_CO2_S1pl)
print(subs_matrix_CO2_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1pl)
subs_matrix_CO2_S1pl = np.vstack((zero_matrix_CO2_S1pl, subs_matrix_CO2_S1pl))
print(subs_matrix_CO2_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1pl = (tf,1)
decomp_tot_CO2_S1pl = np.zeros(matrix_tot_CO2_S1pl)
i = 0
while i < tf:
decomp_tot_CO2_S1pl[:,0] = decomp_tot_CO2_S1pl[:,0] + subs_matrix_CO2_S1pl[:,i]
i = i + 1
print(decomp_tot_CO2_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CO2_Enu(t,remainAGB_CO2_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Enu
#set zero matrix
output_decomp_CO2_Enu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Enu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Enu[i:,i] = decomp_CO2_Enu(t[:len(t)-i],remain_part_CO2_Enu)
print(output_decomp_CO2_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Enu[:,i] = np.diff(output_decomp_CO2_Enu[:,i])
i = i + 1
print(subs_matrix_CO2_Enu[:,:4])
print(len(subs_matrix_CO2_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Enu = subs_matrix_CO2_Enu.clip(max=0)
print(subs_matrix_CO2_Enu[:,:4])
#make the results as absolute values
subs_matrix_CO2_Enu = abs(subs_matrix_CO2_Enu)
print(subs_matrix_CO2_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Enu)
subs_matrix_CO2_Enu = np.vstack((zero_matrix_CO2_Enu, subs_matrix_CO2_Enu))
print(subs_matrix_CO2_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Enu = (tf,1)
decomp_tot_CO2_Enu= np.zeros(matrix_tot_CO2_Enu)
i = 0
while i < tf:
decomp_tot_CO2_Enu[:,0] = decomp_tot_CO2_Enu[:,0] + subs_matrix_CO2_Enu[:,i]
i = i + 1
print(decomp_tot_CO2_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CO2_Epl(t,remainAGB_CO2_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Epl
#set zero matrix
output_decomp_CO2_Epl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Epl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Epl[i:,i] = decomp_CO2_Epl(t[:len(t)-i],remain_part_CO2_Epl)
print(output_decomp_CO2_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Epl[:,i] = np.diff(output_decomp_CO2_Epl[:,i])
i = i + 1
print(subs_matrix_CO2_Epl[:,:4])
print(len(subs_matrix_CO2_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Epl = subs_matrix_CO2_Epl.clip(max=0)
print(subs_matrix_CO2_Epl[:,:4])
#make the results as absolute values
subs_matrix_CO2_Epl = abs(subs_matrix_CO2_Epl)
print(subs_matrix_CO2_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Epl)
subs_matrix_CO2_Epl = np.vstack((zero_matrix_CO2_Epl, subs_matrix_CO2_Epl))
print(subs_matrix_CO2_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Epl = (tf,1)
decomp_tot_CO2_Epl = np.zeros(matrix_tot_CO2_Epl)
i = 0
while i < tf:
decomp_tot_CO2_Epl[:,0] = decomp_tot_CO2_Epl[:,0] + subs_matrix_CO2_Epl[:,i]
i = i + 1
print(decomp_tot_CO2_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1nu,label='CO2_S1nu')
plt.plot(t,decomp_tot_CO2_S1pl,label='CO2_S1pl')
plt.plot(t,decomp_tot_CO2_Enu,label='CO2_Enu')
plt.plot(t,decomp_tot_CO2_Epl,label='CO2_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_PF_PO_S1nu = [c_firewood_energy_S1nu, decomp_emissions[:,0], TestDSM1nu.o, PH_Emissions_PO_S1nu, PH_Emissions_HWP_S1nu, decomp_tot_CO2_S1nu[:,0]]
Emissions_PF_PO_S1pl = [c_firewood_energy_S1pl, decomp_emissions[:,0], TestDSM1pl.o, PH_Emissions_PO_S1pl, PH_Emissions_HWP_S1pl, decomp_tot_CO2_S1pl[:,0]]
Emissions_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions[:,0], TestDSM3nu.o, PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu, decomp_tot_CO2_Enu[:,0]]
Emissions_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions[:,0], TestDSM3pl.o, PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl, decomp_tot_CO2_Epl[:,0]]
Emissions_PF_PO_S1nu = [sum(x) for x in zip(*Emissions_PF_PO_S1nu)]
Emissions_PF_PO_S1pl = [sum(x) for x in zip(*Emissions_PF_PO_S1pl)]
Emissions_PF_PO_Enu = [sum(x) for x in zip(*Emissions_PF_PO_Enu)]
Emissions_PF_PO_Epl = [sum(x) for x in zip(*Emissions_PF_PO_Epl)]
#CH4_S1nu
Emissions_CH4_PF_PO_S1nu = decomp_tot_CH4_S1nu[:,0]
#CH4_S1pl
Emissions_CH4_PF_PO_S1pl = decomp_tot_CH4_S1pl[:,0]
#CH4_Enu
Emissions_CH4_PF_PO_Enu = decomp_tot_CH4_Enu[:,0]
#CH4_Epl
Emissions_CH4_PF_PO_Epl = decomp_tot_CH4_Epl[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1nu = Emissions_PF_PO_S1nu
Col2_S1pl = Emissions_PF_PO_S1pl
Col2_Enu = Emissions_PF_PO_Enu
Col2_Epl = Emissions_PF_PO_Epl
Col3_S1nu = Emissions_CH4_PF_PO_S1nu
Col3_S1pl = Emissions_CH4_PF_PO_S1pl
Col3_Enu = Emissions_CH4_PF_PO_Enu
Col3_Epl = Emissions_CH4_PF_PO_Epl
Col4 = flat_list_nucleus
Col5 = Emission_ref
Col6 = flat_list_plasma
#S1
df1_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1nu,'kg_CH4':Col3_S1nu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df1_pl =
|
pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1pl,'kg_CH4':Col3_S1pl,'kg_CO2_seq':Col6,'emission_ref':Col5})
|
pandas.DataFrame.from_dict
|
# Choose a Top Performer of ETF from previous week
## https://www.etf.com/etfanalytics/etf-finder
etf_list = ['QQQ', 'QLD', 'TQQQ', 'GDXD', 'SPY']
# Get best ticker performance of past 1 week
# def best_etf(etf_list):
# best_ticker_performance = 0
# best_ticker = ''
# for ticker in etf_list:
# ticker_yahoo = yf.Ticker(ticker)
# data = ticker_yahoo.history()
# last_quote = (data.tail(1)['Close'].iloc[0])
# last_7th_quote = (data.tail(7)['Close'].iloc[0]) # 7 means last 7 days
# last_week_performance = (last_quote - last_7th_quote) / last_7th_quote * 100
# if last_week_performance > best_ticker_performance:
# best_ticker_performance = last_week_performance
# best_ticker = ticker
# return(best_ticker, round(best_ticker_performance, 2))
import json
import requests
import pandas as pd
endpoint = "https://data.alpaca.markets/v1"
headers = json.loads(open("key.txt", 'r').read())
tickers = "ROXIF,EDU,TSP,PANW,PDD,BEKE,TAL,QFIN,TME,MPNGF,DADA,JD,DIDI,MPNGY,VNET,YY,ZH,YMM,BZ,CURV,FUTU,MLCO,TIGR,VIPS,FNMA"
def hist_data(symbols, timeframe="15Min", limit=200, start="", end="", after="", until=""):
"""
Returns historical bar data for a string of symbols seperated by comma.
Symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG".
"""
df_data = {}
bar_url = endpoint + "/bars/{}".format(timeframe)
params = {"symbols" : symbols,
"limit" : limit,
"start" : start,
"end" : end,
"after" : after,
"until" : until}
r = requests.get(bar_url, headers=headers, params=params)
json_dump = r.json()
for symbol in json_dump:
temp = pd.DataFrame(json_dump[symbol])
temp.rename({"t": "time",
"o": "open",
"h": "high",
"l": "low",
"c": "close",
"v": "volume"}, axis=1, inplace=True)
temp["time"] =
|
pd.to_datetime(temp["time"], unit="s")
|
pandas.to_datetime
|
#!/usr/bin/env python
"""This module contains the capabilities to simulate the model."""
import pandas as pd
import numpy as np
from trempy.shared.shared_auxiliary import get_optimal_compensations
from trempy.shared.shared_auxiliary import dist_class_attributes
from trempy.shared.shared_auxiliary import criterion_function
from trempy.config_trempy import PREFERENCE_PARAMETERS
from trempy.config_trempy import NEVER_SWITCHERS
from trempy.custom_exceptions import TrempyError
from trempy.clsModel import ModelCls
def simulate(fname):
"""Simulate the model based on the initialization file."""
model_obj = ModelCls(fname)
version = model_obj.attr['version']
# Get fixed args that do not change during simulation.
args = [model_obj, 'sim_agents', 'questions', 'sim_seed', 'sim_file', 'paras_obj', 'cutoffs']
if version in ['scaled_archimedean']:
args += ['upper', 'marginals']
sim_agents, questions, sim_seed, sim_file, paras_obj, cutoffs, upper, marginals = \
dist_class_attributes(*args)
version_specific = {'upper': upper, 'marginals': marginals}
elif version in ['nonstationary']:
sim_agents, questions, sim_seed, sim_file, paras_obj, cutoffs = \
dist_class_attributes(*args)
version_specific = dict()
else:
raise TrempyError('version not implemented')
np.random.seed(sim_seed)
m_optimal = get_optimal_compensations(version, paras_obj, questions, **version_specific)
# First, get number of preference parameters. Paras with higher index belong to questions!
nparas_econ = paras_obj.attr['nparas_econ']
# Now, get standard deviation for the error in each question.
sds = paras_obj.get_values('econ', 'all')[nparas_econ:]
heterogeneity = paras_obj.attr['heterogeneity']
if heterogeneity:
sds_time = sds[1]
sds_risk = sds[2]
# TODO: This is what I am proposing instead of the loop below
# Simulate data
# data = []
# agent_identifier = np.arange(sim_agents)
# for k, q in enumerate(questions):
# lower_cutoff, upper_cutoff = cutoffs[q]
# # If we estimate agent by agent, we use only two sds for time and risk quetions.
# if heterogeneity:
# if q <= 30:
# sds_current_q = sds_time * (upper_cutoff - lower_cutoff) / 200
# else:
# sds_current_q = sds_risk * (upper_cutoff - lower_cutoff) / 20
# else:
# sds_current_q = sds[k]
# m_latent = np.random.normal(loc=m_optimal[q], scale=sds_current_q, size=sim_agents)
# m_observed = np.clip(m_latent, a_min=lower_cutoff, a_max=+np.inf)
# m_observed[m_observed > upper_cutoff] = NEVER_SWITCHERS
# question_identifier = np.repeat(q, repeats=sim_agents)
# data += list(zip(agent_identifier, question_identifier, m_observed))
data = []
for i in range(sim_agents):
for k, q in enumerate(questions):
lower_cutoff, upper_cutoff = cutoffs[q]
# If we estimate agent by agent, we use only two sds for time and risk quetions.
if heterogeneity:
if q <= 30:
sds_current_q = sds_time * (upper_cutoff - lower_cutoff) / 200
else:
sds_current_q = sds_risk * (upper_cutoff - lower_cutoff) / 20
else:
sds_current_q = sds[k]
m_latent = np.random.normal(loc=m_optimal[q], scale=sds_current_q, size=1)
m_observed = np.clip(m_latent, a_min=lower_cutoff, a_max=+np.inf)
m_observed[m_observed > upper_cutoff] = NEVER_SWITCHERS
data += [[i, q, m_observed]]
# Post-processing step
df = pd.DataFrame(data)
df.rename({0: 'Individual', 1: 'Question', 2: 'Compensation'}, inplace=True, axis='columns')
dtype = {'Individual': np.int, 'Question': np.int, 'Compensation': np.float}
df = df.astype(dtype)
df.set_index(['Individual', 'Question'], inplace=True, drop=False)
df.sort_index(inplace=True)
df.to_pickle(sim_file + '.trempy.pkl', protocol=2)
x_econ_all_current = paras_obj.get_values('econ', 'all')
fval, _ = criterion_function(
df, questions, cutoffs, paras_obj, version, sds, **version_specific
)
write_info(
version, x_econ_all_current, df, questions, fval, m_optimal, sim_file + '.trempy.info'
)
return df, fval
def write_info(version, x_econ_all_current, df, questions, likl, m_optimal, fname):
"""Write out some basic information about the simulated dataset."""
df_sim = df['Compensation'].mask(df['Compensation'] == NEVER_SWITCHERS)
paras_label = PREFERENCE_PARAMETERS[version] + questions
fmt_ = '{:>15}' + '{:>15}' + '{:>15} '
with open(fname, 'w') as outfile:
outfile.write('\n {:<25}\n'.format('Observed Data'))
string = '{:>15}' * 10 + '\n'
label = []
label += ['Question', 'Observed', 'Interior', 'Optimal', 'Mean', 'Std.', 'Min.']
label += ['25%', '50%', '75%', 'Max.']
outfile.write('\n')
outfile.write(string.format(*label))
outfile.write('\n')
for i, q in enumerate(questions):
num_observed = df.loc[(slice(None), slice(q, q)), :].shape[0]
stats = df_sim.loc[slice(None), slice(q, q)].describe().tolist()
info = [q, num_observed, stats[0], m_optimal[q]] + stats[1:]
for i in [0, 1, 2]:
info[i] = '{:d}'.format(int(info[i]))
for i in [3, 4, 5, 6, 7, 8, 9]:
if
|
pd.isnull(info[i])
|
pandas.isnull
|
import collections
import os
import joblib
import numpy as np
import pandas as pd
import pysam
from ..common import UNIQUE_READS, MULTIMAP_READS, READS, CHROM, \
JUNCTION_START, JUNCTION_STOP, STRAND
from .core import add_exons_and_junction_ids
def _report_read_positions(read, counter):
chrom = read.reference_name
strand = '-' if read.is_reverse else '+'
last_read_pos = None
for read_loc, genome_loc in read.get_aligned_pairs():
if read_loc is None and last_read_pos is not None:
# Add one to be compatible with STAR output and show the
# start of the intron (not the end of the exon)
start = genome_loc + 1
elif read_loc and last_read_pos is None:
stop = genome_loc # we are right exclusive ,so this is correct
counter[(chrom, start, stop, strand)] += 1
del start
del stop
last_read_pos = read_loc
def _choose_strand_and_sum(reads):
"""Use the strand with more counts and sum all reads with same junction
STAR seems to take a simple majority to decide on strand when there are
reads mapping to both, so we'll do the same
Parameters
----------
reads : pandas.Series
A (chrom, start, stop, strand)-indexed series of read counts
Returns
-------
reads_strand_chosen : pandas.Series
A (chrom, start, stop, strand)-indexed series of read counts, with
the majority strand as the "winner" and
"""
if reads.empty:
return pd.Series(name=reads.name)
locations = reads.groupby(level=(0, 1, 2)).idxmax()
counts = reads.groupby(level=(0, 1, 2)).sum()
index = pd.MultiIndex.from_tuples(locations.values)
return pd.Series(counts.values, index=index, name=reads.name)
def _combine_uniquely_multi(uniquely, multi, ignore_multimapping=False):
"""Combine uniquely and multi-mapped read counts into a single table
Parameters
----------
unqiuely, multi : dict
A dictionary of {(chrom, start, end, strand) : n_reads} uniquely mapped
and multi-mapped (reads that could map to multiple parts of the genome)
ignore_multimapping : bool
When summing all reads, whether or not to ignore the multimapping
reads. Default is False.
Returns
-------
reads : pandas.DataFrame
A combined table of all uniquely and multi-mapped reads, with an
additional column of "reads" which will ultimately be the reads used
for creating an outrigger index and calculating percent spliced-in.
"""
uniquely = pd.Series(uniquely, name=UNIQUE_READS)
multi = pd.Series(multi, name=MULTIMAP_READS)
uniquely = _choose_strand_and_sum(uniquely)
multi = _choose_strand_and_sum(multi)
# Join the data on the chromosome locations
if multi.empty:
reads = uniquely.to_frame()
reads[MULTIMAP_READS] = np.nan
elif uniquely.empty:
reads = multi.to_frame()
reads[UNIQUE_READS] = np.nan
else:
reads = uniquely.to_frame().join(multi)
reads = reads.fillna(0)
reads = reads.astype(int)
if ignore_multimapping:
reads[READS] = reads[UNIQUE_READS]
else:
reads[READS] = reads.sum(axis=1)
reads = reads.reset_index()
reads = reads.rename(columns={'level_0': CHROM, 'level_1': JUNCTION_START,
'level_2': JUNCTION_STOP, 'level_3': STRAND})
reads.index = np.arange(reads.shape[0])
return reads
def _get_junction_reads(filename):
"""Read a sam file and extract unique and multi mapped junction reads"""
samfile = pysam.AlignmentFile(filename, "rb")
# Uniquely mapped reads
uniquely = collections.Counter()
# Multimapped reads
multi = collections.Counter()
for read in samfile.fetch():
if "N" in read.cigarstring:
if read.mapping_quality < 255:
counter = multi
else:
counter = uniquely
_report_read_positions(read, counter)
samfile.close()
return uniquely, multi
def bam_to_junction_reads_table(bam_filename, ignore_multimapping=False):
"""Create a table of reads for this bam file"""
uniquely, multi = _get_junction_reads(bam_filename)
reads = _combine_uniquely_multi(uniquely, multi, ignore_multimapping)
# Remove "junctions" with same start and stop
reads = reads.loc[reads[JUNCTION_START] != reads[JUNCTION_STOP]]
reads.index = np.arange(reads.shape[0])
reads['sample_id'] = os.path.basename(bam_filename)
reads = add_exons_and_junction_ids(reads)
return reads
def read_multiple_bams(bam_filenames, ignore_multimapping=False, n_jobs=-1):
dfs = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(
bam_to_junction_reads_table)(filename, ignore_multimapping)
for filename in bam_filenames)
reads =
|
pd.concat(dfs, ignore_index=True)
|
pandas.concat
|
import csv
import logging
import os
import tempfile
from datetime import datetime, date
from io import StringIO
from typing import Dict, List, Any, TypeVar
import click
import pandas as pd
import s3fs
import sqlalchemy.engine
from requests import HTTPError
from snowflake.connector.pandas_tools import write_pandas
from sqlalchemy import Column, TEXT, TIMESTAMP, DATE, INT, FLOAT, BOOLEAN
from dbd.config.dbd_project import DbdProjectConfigException
from dbd.db.db_table import DbTable
from dbd.log.dbd_exception import DbdException
from dbd.tasks.db_table_task import DbTableTask
from dbd.utils.io_utils import download_file, url_to_filename, is_zip, extract_zip_file, zip_to_url_and_locator, \
is_kaggle, extract_kaggle_dataset_id_and_zip_name, download_kaggle
from dbd.utils.io_utils import is_url
from dbd.utils.sql_parser import SqlParser
log = logging.getLogger(__name__)
class DbdUnsupportedDataFile(DbdException):
pass
class DbdInvalidDataFileFormatException(DbdException):
pass
class DbdInvalidDataFileReferenceException(DbdException):
pass
class DbdDataLoadError(DbdException):
pass
DataTaskType = TypeVar('DataTaskType', bound='DataTask')
def psql_writer(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
class DataTask(DbTableTask):
"""
Data loading task. Loads data from a local data file (e.g. CSV) to database.
"""
def __init__(self, task_def: Dict[str, Any]):
"""
Data task constructor
:param Dict[str, Any] task_def: Target table definition
"""
super().__init__(task_def)
def data_files(self) -> List[str]:
"""
Task data files
:return: task data files
"""
return self.task_data()
def set_data_files(self, data_files: List[str]):
"""
Sets task data files
:param List[str] data_files: task data file
"""
self.set_task_data(data_files)
@classmethod
def from_code(cls, task_def: Dict[str, Any]) -> DataTaskType:
"""
Creates a new task from table definition (dict)
:param Dict[str, Any] task_def: table definition (dict)
:return: new EltTask instance
:rtype: EltTask
"""
return DataTask(task_def)
# noinspection PyMethodMayBeStatic
def __data_file_columns(self, data_frame: pd.DataFrame) -> List[sqlalchemy.Column]:
"""
Introspects data file columns
:param pd.DataFrame data_frame: Pandas dataframe with populated data
:return: list of data file columns (SQLAlchemy Column[])
:rtype: List[sqlalchemy.Column]
"""
columns = []
for column_name, column_type in data_frame.dtypes.iteritems():
if column_type.name == 'datetime64[ns]':
columns.append(Column(column_name, TIMESTAMP))
elif column_type.name == 'datetime64[D]':
columns.append(Column(column_name, DATE))
elif column_type.name == 'object':
columns.append(Column(column_name, TEXT))
elif column_type.name == 'int64':
columns.append(Column(column_name, INT))
elif column_type.name == 'float64':
columns.append(Column(column_name, FLOAT))
elif column_type.name == 'bool':
columns.append(Column(column_name, BOOLEAN))
else:
columns.append(Column(column_name, TEXT))
return columns
# noinspection DuplicatedCode
def __override_data_file_column_definitions(self, data_frame: pd.DataFrame) -> Dict[str, Any]:
"""
Merges the data file column definitions with the column definitions from the task_def.
The column definitions override the introspected data file types
:param pd.DataFrame data_frame: Pandas dataframe with populated data
:return: data file columns overridden with the task's explicit column definitions
:rtype: Dict[str, Any]
"""
table_def = self.table_def()
column_overrides = table_def.get('columns', {})
data_file_columns = self.__data_file_columns(data_frame)
ordered_columns = {}
for c in data_file_columns:
overridden_column = column_overrides.get(c.name)
if overridden_column:
if 'type' not in overridden_column:
overridden_column['type'] = c.type
ordered_columns[c.name] = overridden_column
else:
ordered_columns[c.name] = {"type": c.type}
table_def['columns'] = ordered_columns
return table_def
def create(self, target_alchemy_metadata: sqlalchemy.MetaData, alchemy_engine: sqlalchemy.engine.Engine,
**kwargs) -> None:
"""
Executes the task. Creates the target table and loads data
:param sqlalchemy.MetaData target_alchemy_metadata: MetaData SQLAlchemy MetaData
:param Dict[str, str] copy_stage_storage: copy stage storage parameters e.g. AWS S3 dict(url, access_key, secret_key)
:param sqlalchemy.engine.Engine alchemy_engine:
"""
try:
copy_stage_storage = kwargs.get('copy_stage_storage')
global_tmpdir = kwargs.get('global_tmpdir')
for data_file in self.data_files():
if len(data_file) > 0:
with tempfile.TemporaryDirectory() as locaL_tmpdir:
current_tmpdir = locaL_tmpdir
zip_locator = None
if is_zip(data_file):
data_file, zip_locator = zip_to_url_and_locator(data_file)
if is_url(data_file):
absolute_file_name = os.path.join(current_tmpdir, url_to_filename(data_file))
click.echo(f"\tDownloading file from URL: '{data_file}'.")
download_file(data_file, absolute_file_name)
data_file = absolute_file_name
if is_kaggle(data_file):
current_tmpdir = global_tmpdir
kaggle_dataset_id, kaggle_zip_name = extract_kaggle_dataset_id_and_zip_name(data_file)
absolute_file_name = os.path.join(current_tmpdir, f"{kaggle_zip_name}.zip")
click.echo(f"\tDownloading Kaggle dataset: '{data_file}'.")
download_kaggle(kaggle_dataset_id, current_tmpdir)
data_file = absolute_file_name
if zip_locator is not None and len(zip_locator) > 0:
absolute_file_name = os.path.join(current_tmpdir, os.path.basename(zip_locator))
click.echo(f"\tExtracting file from archive: '{data_file}'.")
extract_zip_file(data_file, zip_locator, current_tmpdir)
data_file = absolute_file_name
click.echo(f"\tProcessing local file: '{data_file}'.")
absolute_file_name = data_file
df = self.__read_file_to_dataframe(absolute_file_name)
mysql_bulk_load_config = alchemy_engine.url.query.get('local_infile') == '1'
if self.db_table() is None:
table_def = self.__override_data_file_column_definitions(df)
db_table = DbTable.from_code(self.target(), table_def, target_alchemy_metadata,
self.target_schema())
self.set_db_table(db_table)
db_table.create()
dtype = self.__adjust_dataframe_datatypes(df, alchemy_engine.dialect.name)
click.echo(f"\tLoading data to database.")
if alchemy_engine.dialect.name == 'snowflake':
self.__bulk_load_snowflake(df, alchemy_engine)
elif alchemy_engine.dialect.name == 'postgresql':
df.to_sql(self.target(), alchemy_engine, chunksize=1024, method=psql_writer,
schema=self.target_schema(), if_exists='append', index=False, dtype=dtype)
elif alchemy_engine.dialect.name == 'mysql' and mysql_bulk_load_config:
self.__bulk_load_mysql(df, alchemy_engine)
elif alchemy_engine.dialect.name == 'bigquery':
self.__bulk_load_bigquery(df, dtype, alchemy_engine)
elif alchemy_engine.dialect.name == 'redshift' and copy_stage_storage is not None:
self.__bulk_load_redshift(df, alchemy_engine, copy_stage_storage)
else:
if alchemy_engine.dialect.name == 'redshift':
log.warning(
"Using default SQLAlchemy writer for Redshift. Specify 'copy_stage' parameter "
"in your profile configuration file to make loading faster.")
if alchemy_engine.dialect.name == 'mysql':
log.warning(
"Using default SQLAlchemy writer for MySQL. Specify 'local_infile=1' parameter "
"in a query parameter of your MySQL connection string to make loading faster.")
df.to_sql(self.target(), alchemy_engine, chunksize=1024, method='multi',
schema=self.target_schema(), if_exists='append', index=False, dtype=dtype)
except sqlalchemy.exc.IntegrityError as e:
raise DbdDataLoadError(f" Referential integrity error: {e}")
except ValueError as e:
raise DbdInvalidDataFileFormatException(f"Error parsing file '{absolute_file_name}': {e}")
except (FileNotFoundError, HTTPError) as e:
raise DbdInvalidDataFileReferenceException(f"Referenced file '{absolute_file_name}' doesn't exist: {e}")
def __bulk_load_bigquery(self, df: pd.DataFrame, dtype: Dict[str, str], alchemy_engine: sqlalchemy.engine.Engine):
"""
Bulk load data to BigQuery
:param pd.DataFrame df: pandas dataframe
:param Dict[str, str] dtype: Data types for each column
:param sqlalchemy.engine.Engine alchemy_engine: SqlAlchemy engine
"""
table_schema = [dict(name=k, type=SqlParser.datatype_to_gbq_datatype(str(v))) for (k, v) in dtype.items()]
target_schema = self.target_schema()
dataset = target_schema if target_schema is not None and len(target_schema) > 0 \
else alchemy_engine.engine.url.database
df.to_gbq(f"{dataset}.{self.target()}", if_exists='append', table_schema=table_schema)
def __bulk_load_redshift(self, df: pd.DataFrame, alchemy_engine: sqlalchemy.engine.Engine,
copy_stage_storage: Dict[str, str]):
"""
Bulk load data to Redshift
:param pd.DataFrame df: pandas dataframe
:param sqlalchemy.engine.Engine alchemy_engine: SqlAlchemy engine
:param Dict[str, str] copy_stage_storage: copy stage storage parameters e.g. AWS S3 dict(url, access_key, secret_key)
"""
if copy_stage_storage is not None:
if 'url' in copy_stage_storage:
aws_stage_path = copy_stage_storage['url']
else:
raise DbdProjectConfigException(
"Missing 'url' key in the 'copy_stage' storage definition parameter in your profile file.")
if 'access_key' in copy_stage_storage:
aws_access_key = copy_stage_storage['access_key']
else:
raise DbdProjectConfigException(
"Missing 'access_key' key in the 'copy_stage' storage definition parameter in your profile file.")
if 'secret_key' in copy_stage_storage:
aws_secret_key = copy_stage_storage['secret_key']
else:
raise DbdProjectConfigException(
"Missing 'secret_key' key in the 'copy_stage' storage definition parameter in your profile file.")
temp_file_name = f"{aws_stage_path.rstrip('/')}/{self.target_schema()}/{self.target()}" \
f"_{datetime.now().strftime('%y%m%d_%H%M%S')}"
df.to_csv(f"{temp_file_name}.csv.gz", index=False, quoting=csv.QUOTE_NONNUMERIC, compression='gzip',
storage_options={"key": aws_access_key,
"secret": aws_secret_key})
with alchemy_engine.connect() as conn:
target_schema = self.target_schema()
target_schema_with_dot = f"{target_schema}." if target_schema else ''
conn.execute(f"copy {target_schema_with_dot}{self.target()} from '{temp_file_name}.csv.gz' "
f"CREDENTIALS 'aws_access_key_id={aws_access_key};aws_secret_access_key={aws_secret_key}' "
f"FORMAT CSV DELIMITER AS ',' DATEFORMAT 'YYYY-MM-DD' EMPTYASNULL IGNOREHEADER 1 GZIP")
conn.connection.commit()
file = s3fs.S3FileSystem(anon=False, key=aws_access_key, secret=aws_secret_key)
file.rm(f"{temp_file_name}.csv.gz")
else:
raise DbdProjectConfigException("Redshift requires 'copy_stage' parameter in your project file.")
def __bulk_load_snowflake(self, df: pd.DataFrame, alchemy_engine: sqlalchemy.engine.Engine):
"""
Bulk load data to snowflake
:param pandas.DataFrame df: DataFrame
:param sqlalchemy.engine.Engine alchemy_engine: SQLAlchemy engine
"""
#df.columns = map(str.upper, df.columns)
#table_name = self.target().upper()
table_name = self.target()
schema_name = self.target_schema()
#schema_name = schema_name.upper() if schema_name else None
with alchemy_engine.connect() as conn:
write_pandas(
conn.connection, df,
table_name=table_name,
schema=schema_name,
quote_identifiers=True)
conn.connection.commit()
def __bulk_load_mysql(self, df: pd.DataFrame, alchemy_engine: sqlalchemy.engine.Engine):
"""
Bulk load data to MySQL
:param pandas.DataFrame df: DataFrame
:param sqlalchemy.engine.Engine alchemy_engine: SQLAlchemy engine
"""
with tempfile.TemporaryDirectory() as tmp_dir_name:
temporary_file_name = f"{tmp_dir_name}/bulk.csv"
df.to_csv(temporary_file_name, index=False, na_rep='\\N')
target_schema = self.target_schema()
target_schema_with_dot = f"{target_schema}." if target_schema else ''
with alchemy_engine.connect() as conn:
query = f"LOAD DATA LOCAL INFILE '{temporary_file_name}' " \
f"INTO TABLE {target_schema_with_dot}{self.target()} " \
f"FIELDS TERMINATED BY ',' " \
f"OPTIONALLY ENCLOSED BY '\"' ESCAPED BY '\\\\' IGNORE 1 LINES"
conn.execute(query)
conn.connection.commit()
def __adjust_dataframe_datatypes(self, df, dialect_name: str):
"""
Adjusts the dataframe datatypes to match the target table
:param pd.DataFrame df: Pandas dataframe with populated data
:param str dialect_name: SQLAlchemy dialect name
:return: dtype for to_sql
"""
dtype = {}
for c in self.db_table().columns():
column_name = c.name()
column_type = c.type()
# Snowflake fix
if str(column_type).upper().startswith('TIMESTAMP_'):
python_type = datetime
else:
python_type = SqlParser.parse_alchemy_data_type(column_type).python_type
if isinstance(python_type, type) and issubclass(python_type, datetime):
if dialect_name in ['bigquery']:
df[column_name] = pd.to_datetime(df[column_name]).dt.strftime('%Y-%m-%d %H:%M:%S')
df[column_name] = df[column_name].astype('datetime64[ns]')
elif dialect_name in ['snowflake']:
df[column_name] = pd.to_datetime(df[column_name]).dt.strftime('%Y-%m-%d %H:%M:%S')
else:
df[column_name] = pd.to_datetime(df[column_name])
elif isinstance(python_type, type) and issubclass(python_type, date):
if dialect_name in ['bigquery']:
df[column_name] = pd.to_datetime(df[column_name]).dt.strftime('%Y-%m-%d')
df[column_name] = df[column_name].astype('datetime64[ns]')
elif dialect_name in ['snowflake']:
df[column_name] = pd.to_datetime(df[column_name], ).dt.strftime('%Y-%m-%d')
else:
df[column_name] = pd.to_datetime(df[column_name])
elif isinstance(python_type, type) and issubclass(python_type, bool):
if dialect_name in ['mysql']:
df[column_name] = df[column_name].map(lambda x: SqlParser.parse_bool_int(x))
df[column_name] = df[column_name].astype('float').astype('Int64')
else:
df[column_name] = df[column_name].map(lambda x: SqlParser.parse_bool(x))
df[column_name] = df[column_name].astype('boolean')
elif isinstance(python_type, type) and issubclass(python_type, int):
df[column_name] = df[column_name].astype('float').astype('Int64')
elif isinstance(python_type, type) and issubclass(python_type, float):
df[column_name] = df[column_name].astype(python_type)
else:
# consistently interpret "" as NULL
df[column_name] = df[column_name].map(lambda x: SqlParser.parse_string(x))
df[column_name] = df[column_name].astype('object')
dtype[column_name] = column_type
return dtype
# noinspection PyMethodMayBeStatic
def __read_file_to_dataframe(self, absolute_file_name: str) -> pd.DataFrame:
"""
Read the file content to Pandas dataframe
:param str absolute_file_name: filename to read the dataframe from
:return: Pandas DataFrame
:rtype: pd.DataFrame
"""
try:
# if is_url(absolute_file_name):
# absolute_file_name = download_file(absolute_file_name, absolute_file_name)
file_name, file_extension = os.path.splitext(absolute_file_name)
if file_extension.lower() == '.csv':
return pd.read_csv(absolute_file_name, dtype=str)
elif file_extension.lower() == '.json':
# noinspection PyTypeChecker
return
|
pd.read_json(absolute_file_name)
|
pandas.read_json
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 17:33:23 2019
@author: <NAME>
"""
import numpy as np, scipy as sp, scipy.stats as stats
from scipy.optimize import minimize
from scipy.stats import multivariate_normal, norm
from numpy import exp, log, sqrt
from scipy.special import logsumexp
import pandas as pd
from os.path import expanduser, exists, join
import hashlib
import os
import gzip
import pickle
class DataPrep(object):
@staticmethod
def get_data(url, md5, preparation_func, pickle_name, *contained_file_paths):
"""
preparation_func - takes a dictionary with filenames (contained in a ZIP) as keys, file-like objects as values
"""
if not exists(pickle_name):
rval = preparation_func(DataPrep.download_check_unpack(url, md5, *contained_file_paths))
pickle.dump(rval, gzip.GzipFile(pickle_name, 'w'))
else:
rval = pickle.load(gzip.GzipFile(pickle_name, 'r'))
return rval
@staticmethod
def download_check_unpack(url, md5, *contained_file_paths):
from tempfile import TemporaryDirectory, TemporaryFile
from urllib.request import urlretrieve
from zipfile import ZipFile, is_zipfile
file_objs = {}
with TemporaryDirectory() as tmpdirname:
if isinstance(url, str):
(web_file, _) = urlretrieve(url)
if md5 is not None:
hash_md5 = hashlib.md5()
with open(web_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
assert(hash_md5.hexdigest() == md5)
if is_zipfile(web_file):
zip = ZipFile(web_file)
for name in contained_file_paths:
extr_name = zip.extract(name, tmpdirname)
file_objs[name] = open(extr_name, 'r')
else:
file_objs[web_file] = open(web_file, 'r')
else:
for (i, u) in enumerate(url):
(web_file, _) = urlretrieve(u)
file_objs[contained_file_paths[i]] = web_file
return file_objs
@staticmethod
def prepare_markov_1st(covariates, measurements):
"""
pepare date for Markovian (1st order) model.
covariates - the covariates used for prediction
measurements - the measurements
returns: (indepvar_t, depvar_t)
indepvar_t : covariates at timepoint t concatenated with measurements at timepoint t - 1
depvar_t : measurements at timepoint t
"""
assert(len(covariates.shape) == 2 and len(measurements.shape) == 2)
indepvar = np.concatenate((covariates[1:, :], measurements[:-1, :]), 1)
depvar = measurements[1:, :]
return (indepvar, depvar)
class Power(object):
def __init__(self, root = '~/Documents/datasets/'):
self.root = expanduser(root)
if not exists(self.root):
os.makedirs(self.root)
pickle_name = join(self.root, 'power_noisy.gzip')
self.data = DataPrep.get_data("http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip",
"41f51806846b6b567b8ae701a300a3de",
lambda file_objs: Power.prepare(Power.load_raw(file_objs["household_power_consumption.txt"])),
pickle_name,
"household_power_consumption.txt")
self.measurement_interval_in_sec = 60
def get_ihgp_window(self, no):
beg = ((pd.to_datetime('2006-12-16 17:24:00') - pd.Timestamp("1970-01-01")) //
|
pd.Timedelta('1s')
|
pandas.Timedelta
|
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=
|
pd.Timedelta('1h')
|
pandas.Timedelta
|
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 =
|
DataFrame({"A": [1, 1]}, index=idx2)
|
pandas.DataFrame
|
from __future__ import annotations
from typing import Dict, List, Optional, Type, cast
import dcp.storage.base as storage
import pandas as pd
from commonmodel import (
DEFAULT_FIELD_TYPE,
Boolean,
Date,
DateTime,
Field,
FieldType,
Float,
Integer,
Schema,
Time,
)
from commonmodel.field_types import Binary, Decimal, Json, LongBinary, LongText, Text
from dateutil import parser
from dcp.data_format.base import DataFormat, DataFormatBase
from dcp.data_format.formats.memory.records import (
cast_python_object_to_field_type,
select_field_type,
)
from dcp.data_format.handler import FormatHandler
from loguru import logger
from pandas import DataFrame
class DataFrameFormat(DataFormatBase[DataFrame]):
natural_storage_class = storage.MemoryStorageClass
natural_storage_engine = storage.LocalPythonStorageEngine
nickname = "dataframe"
class PythonDataframeHandler(FormatHandler):
for_data_formats = [DataFrameFormat]
for_storage_engines = [storage.LocalPythonStorageEngine]
def infer_data_format(self, name, storage) -> Optional[DataFormat]:
obj = storage.get_api().get(name)
if isinstance(obj, pd.DataFrame):
return DataFrameFormat
return None
def infer_field_names(self, name, storage) -> List[str]:
return storage.get_api().get(name).columns
def infer_field_type(
self, name: str, storage: storage.Storage, field: str
) -> FieldType:
df = storage.get_api().get(name)
cast(DataFrame, df)
series = df[field]
ft = pandas_series_to_field_type(series)
return ft
def cast_to_field_type(
self, name: str, storage: storage.Storage, field: str, field_type: FieldType
):
df = storage.get_api().get(name)
cast(DataFrame, df)
if field in df.columns:
df[field] = cast_series_to_field_type(df[field], field_type)
storage.get_api().put(name, df) # Unnecessary?
def create_empty(self, name, storage, schema: Schema):
df =
|
DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
class DataParser:
@staticmethod
def _parse_companies(cmp_list):
"""
Создает DataFrame компаний по списку словарей из запроса
:param cmp_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=['ID', 'TITLE', 'CMP_TYPE_CUSTOMER', 'CMP_TYPE_PARTNER'])
if cmp_list:
cmp_df = pd.DataFrame(cmp_list)
cmp_df['CMP_TYPE_CUSTOMER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'CUSTOMER') else 0)
cmp_df['CMP_TYPE_PARTNER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'PARTNER') else 0)
cmp_df = cmp_df.drop(columns=['COMPANY_TYPE'], axis=1)
ret_df = pd.concat([ret_df, cmp_df])
return ret_df
@staticmethod
def _parse_deals(deal_list):
"""
Создает DataFrame сделок по списку словарей из запроса
:param deal_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'OPPORTUNITY_DEAL_Q01', 'PROBABILITY_DEAL_Q01', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q01',
'OPPORTUNITY_DEAL_Q09', 'PROBABILITY_DEAL_Q09', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q09',
'OPPORTUNITY_DEAL_MEAN', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEAN', 'CLOSED',
'OPPORTUNITY_DEAL_MEDIAN', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEDIAN', 'DEAL_BY_YEAR'])
ret_df.index.name = 'COMPANY_ID'
if deal_list:
deal_df = pd.DataFrame(deal_list)
deal_df['CLOSED'] = deal_df['CLOSED'].apply(lambda x: 1 if (x == 'Y') else 0)
deal_df['OPPORTUNITY'] = pd.to_numeric(deal_df['OPPORTUNITY'])
deal_df['PROBABILITY'] = pd.to_numeric(deal_df['PROBABILITY'])
deal_df['BEGINDATE'] = pd.to_datetime(deal_df['BEGINDATE'])
deal_df['CLOSEDATE'] = pd.to_datetime(deal_df['CLOSEDATE'])
deal_df['TIME_DIFF_BEGIN_CLOSE'] = (deal_df['CLOSEDATE'] - deal_df['BEGINDATE']).astype(
'timedelta64[h]') / 24
deal_group = deal_df.groupby(by='COMPANY_ID')
deal_count = pd.DataFrame(deal_group['CLOSED'].count())
deal_date_max = deal_group['CLOSEDATE'].max()
deal_date_min = deal_group['BEGINDATE'].min()
d = {'YEAR': (deal_date_max - deal_date_min).astype('timedelta64[h]') / (24 * 365)}
deal_date_max_min_diff = pd.DataFrame(data=d)
deal_by_year = pd.DataFrame()
deal_by_year['DEAL_BY_YEAR'] = (deal_count['CLOSED'] / deal_date_max_min_diff['YEAR']).astype(np.float32)
deal_quantile01 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.1)
deal_quantile09 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.9)
deal_mean = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE', 'CLOSED'].mean()
deal_median = deal_group['OPPORTUNITY', 'TIME_DIFF_BEGIN_CLOSE'].median()
deal_result = pd.merge(deal_quantile01, deal_quantile09, on='COMPANY_ID',
suffixes=['_DEAL_Q01', '_DEAL_Q09'])
deal_result1 = pd.merge(deal_mean, deal_median, on='COMPANY_ID', suffixes=['_DEAL_MEAN', '_DEAL_MEDIAN'])
deal_result = pd.merge(deal_result, deal_result1, on='COMPANY_ID')
deal_result = pd.merge(deal_result, deal_by_year, on='COMPANY_ID')
deal_result = deal_result.mask(np.isinf(deal_result))
ret_df = pd.concat([ret_df, deal_result])
return ret_df
@staticmethod
def _parse_invoices(inv_list):
"""
Создает DataFrame счетов по списку словарей из запроса
:param inv_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'PRICE_INV_Q01', 'TIME_DIFF_PAYED_BILL_INV_Q01', 'TIME_DIFF_PAYBEF_PAYED_INV_Q01',
'PRICE_INV_Q09', 'TIME_DIFF_PAYED_BILL_INV_Q09', 'TIME_DIFF_PAYBEF_PAYED_INV_Q09', 'PRICE_INV_MEAN',
'TIME_DIFF_PAYED_BILL_INV_MEAN', 'TIME_DIFF_PAYBEF_PAYED_INV_MEAN', 'PAYED', 'STATUS_ID_P',
'STATUS_ID_D', 'STATUS_ID_N', 'STATUS_ID_T', 'PRICE_INV_MEDIAN', 'TIME_DIFF_PAYED_BILL_INV_MEDIAN',
'TIME_DIFF_PAYBEF_PAYED_INV_MEDIAN', 'MONTH_TOGETHER_INV', 'DEAL_BY_YEAR'])
ret_df.index.name = 'UF_COMPANY_ID'
if inv_list:
inv_df = pd.DataFrame(inv_list)
inv_df['PRICE'] = pd.to_numeric(inv_df['PRICE'])
inv_df['DATE_BILL'] = pd.to_datetime(inv_df['DATE_BILL'])
inv_df['DATE_PAYED'] = pd.to_datetime(inv_df['DATE_PAYED'])
inv_df['DATE_PAY_BEFORE'] = pd.to_datetime(inv_df['DATE_PAY_BEFORE'])
inv_df['TIME_DIFF_PAYED_BILL'] = (inv_df['DATE_PAYED'] - inv_df['DATE_BILL']).astype('timedelta64[h]') / 24
inv_df['TIME_DIFF_PAYBEF_PAYED'] = (inv_df['DATE_PAY_BEFORE'] - inv_df['DATE_PAYED']).astype('timedelta64[h]') / 24
inv_df['PAYED'] = inv_df['PAYED'].apply(lambda x: 1 if (x == 'Y') else 0)
inv_df['STATUS_ID_P'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'P') else 0)
inv_df['STATUS_ID_D'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'D') else 0)
inv_df['STATUS_ID_N'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'N') else 0)
inv_df['STATUS_ID_T'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'T') else 0)
inv_group = inv_df.groupby(by='UF_COMPANY_ID')
inv_date_max = inv_group['DATE_PAYED'].max()
inv_date_min = inv_group['DATE_PAYED'].min()
inv_month_together =
|
pd.DataFrame()
|
pandas.DataFrame
|
import math
import os
import time
from datetime import datetime
from math import inf
from heapq import heappop, heappush
import collections
import functools
from collections import defaultdict
import heapq
import random
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import gurobipy as gp
from gurobipy import *
from shapely.geometry import Point,LineString
import geopandas as gpd
import osmnx as ox
class World:
"""
一个类
"""
Observation = collections.namedtuple('Observation', 'traveltime origin destination') # 起点位置的集合
def __init__(self, type=0, num=100, sigma=0, reg=0, time_limit=0.6):
"""
nodeUrl: 图对象的点的标识信息和位置信息
edgeUrl: 图对象的弧的标识信息、位置信息以及连接信息
type: 选择图对象的类型,0为small,1为normal
超参数num,sigma,reg
"""
self.type = type
self.num = num
self.sigma = sigma
self.reg = reg
self.time_limit = time_limit
def True_Graph(self):
"""
如果type=0时,加载small_model的真实图。如果type=1时,加载normal_model的真实图。如果其他情况,加载manhattan的真实图。
:return: 返回一个加载好的的图G对象
"""
if self.type == 0:
# <载入文件模块>
df_nodelist = pd.read_csv("../train_dataset/smallnodelist.csv")
df_edgelist = pd.read_csv("../train_dataset/smalledgelist.csv")
# 创建多重有向图,add_edge(1,2), add_edge(2,1)
T = nx.MultiDiGraph() # 初始化图并载入点和边模块
T.add_nodes_from(df_nodelist['node']) # 添加点auto
T.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络arcTime和distance模块>
for u, v, d in T.edges(data=True):
T.edges[u, v, 0]['distance'] = 1
for u, v, d in T.edges(data=True): # 设置outside的行程时间
T.edges[u, v, 0]['arcTime'] = 1
T.edges[7, 8, 0]['arcTime'] = 4
T.edges[8, 7, 0]['arcTime'] = 4
T.edges[8, 9, 0]['arcTime'] = 4
T.edges[9, 8, 0]['arcTime'] = 4
T.edges[12, 13, 0]['arcTime'] = 4
T.edges[13, 12, 0]['arcTime'] = 4
T.edges[13, 14, 0]['arcTime'] = 4
T.edges[14, 13, 0]['arcTime'] = 4
T.edges[17, 18, 0]['arcTime'] = 4
T.edges[18, 17, 0]['arcTime'] = 4
T.edges[18, 19, 0]['arcTime'] = 4
T.edges[19, 18, 0]['arcTime'] = 4
T.edges[7, 12, 0]['arcTime'] = 4
T.edges[12, 7, 0]['arcTime'] = 4
T.edges[12, 17, 0]['arcTime'] = 4
T.edges[17, 12, 0]['arcTime'] = 4
T.edges[8, 13, 0]['arcTime'] = 4
T.edges[13, 8, 0]['arcTime'] = 4
T.edges[13, 18, 0]['arcTime'] = 4
T.edges[18, 13, 0]['arcTime'] = 4
T.edges[9, 14, 0]['arcTime'] = 4
T.edges[14, 9, 0]['arcTime'] = 4
T.edges[14, 19, 0]['arcTime'] = 4
T.edges[19, 14, 0]['arcTime'] = 4
return T
elif self.type == 1:
# <载入文件模块>
df_nodelist = pd.read_csv('../train_dataset/normalnodelist.csv')
df_edgelist = pd.read_csv('../train_dataset/normaledgelist.csv')
# 创建多重有向图,add_edge(1,2), add_edge(2,1)
T = nx.MultiDiGraph() # 初始化图并载入点和边模块
T.add_nodes_from(df_nodelist['node']) # 添加点auto
T.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络arcTime和distance模块>
for u, v, d in T.edges(data=True):
T.edges[u, v, 0]['distance'] = 1
for u, v, d in T.edges(data=True): # 设置outside的行程时间
T.edges[u, v, 0]['arcTime'] = 1
T.edges[31, 32, 0]['arcTime'] = 4 # 设置upper-left的行程时间
T.edges[32, 31, 0]['arcTime'] = 4
T.edges[31, 51, 0]['arcTime'] = 4 # 设置第2row的weight
T.edges[51, 31, 0]['arcTime'] = 4
for i in range(32, 39):
T.edges[i, i - 1, 0]['arcTime'] = 4
T.edges[i - 1, i, 0]['arcTime'] = 4
T.edges[i, i + 1, 0]['arcTime'] = 4
T.edges[i + 1, i, 0]['arcTime'] = 4
T.edges[i, i + 20, 0]['arcTime'] = 4
T.edges[i + 20, i, 0]['arcTime'] = 4
T.edges[39, 38, 0]['arcTime'] = 4
T.edges[38, 39, 0]['arcTime'] = 4
T.edges[39, 59, 0]['arcTime'] = 4
T.edges[59, 39, 0]['arcTime'] = 4
for j in range(51, 191, 20): # 设置第3row到第9row的weight
T.edges[j, j + 1, 0]['arcTime'] = 4
T.edges[j + 1, j, 0]['arcTime'] = 4
T.edges[j, j - 20, 0]['arcTime'] = 4
T.edges[j - 20, j, 0]['arcTime'] = 4
T.edges[j, j + 20, 0]['arcTime'] = 4
T.edges[j + 20, j, 0]['arcTime'] = 4
for i in range(j + 1, j + 8):
T.edges[i, i - 1, 0]['arcTime'] = 4
T.edges[i - 1, i, 0]['arcTime'] = 4
T.edges[i, i + 1, 0]['arcTime'] = 4
T.edges[i + 1, i, 0]['arcTime'] = 4
T.edges[i, i - 20, 0]['arcTime'] = 4
T.edges[i - 20, i, 0]['arcTIme'] = 4
T.edges[i, i + 20, 0]['arcTime'] = 4
T.edges[i + 20, i, 0]['arcTime'] = 4
T.edges[j + 8, j + 8 - 1, 0]['arcTime'] = 4
T.edges[j + 8 - 1, j + 8, 0]['arcTime'] = 4
T.edges[j + 8, j + 8 - 20, 0]['arcTime'] = 4
T.edges[j + 8 - 20, j + 8, 0]['arcTime'] = 4
T.edges[j + 8, j + 8 + 20, 0]['arcTime'] = 4
T.edges[j + 8 + 20, j + 8, 0]['arcTime'] = 4
T.edges[191, 192, 0]['arcTime'] = 4 # 设置第10row的weight
T.edges[192, 191, 0]['arcTime'] = 4
T.edges[191, 171, 0]['arcTime'] = 4
T.edges[171, 191, 0]['arcTime'] = 4
for i in range(192, 199):
T.edges[i, i - 1, 0]['arcTime'] = 4
T.edges[i - 1, i, 0]['arcTime'] = 4
T.edges[i, i + 1, 0]['arcTime'] = 4
T.edges[i + 1, i, 0]['arcTime'] = 4
T.edges[i, i - 20, 0]['arcTime'] = 4
T.edges[i - 20, i, 0]['arcTime'] = 4
T.edges[199, 198, 0]['arcTime'] = 4
T.edges[198, 199, 0]['arcTime'] = 4
T.edges[199, 179, 0]['arcTime'] = 4
T.edges[179, 199, 0]['arcTime'] = 4
T.edges[202, 203, 0]['arcTime'] = 2 # 设置lower-right的行程时间
T.edges[203, 202, 0]['arcTime'] = 2
T.edges[202, 222, 0]['arcTime'] = 2 # 设置第11row的weight
T.edges[222, 202, 0]['arcTime'] = 2
for i in range(203, 210):
T.edges[i, i - 1, 0]['arcTime'] = 2
T.edges[i - 1, i, 0]['arcTime'] = 2
T.edges[i, i + 1, 0]['arcTime'] = 2
T.edges[i + 1, i, 0]['arcTime'] = 2
T.edges[i, i + 20, 0]['arcTime'] = 2
T.edges[i + 20, i, 0]['arcTime'] = 2
T.edges[210, 209, 0]['arcTime'] = 2
T.edges[209, 210, 0]['arcTime'] = 2
T.edges[210, 230, 0]['arcTime'] = 2
T.edges[230, 210, 0]['arcTime'] = 2
for j in range(222, 362, 20): # 设置第12row到第18row的weight
T.edges[j, j + 1, 0]['arcTime'] = 2
T.edges[j + 1, j, 0]['arcTime'] = 2
T.edges[j, j - 20, 0]['arcTime'] = 2
T.edges[j - 20, j, 0]['arcTime'] = 2
T.edges[j, j + 20, 0]['arcTime'] = 2
T.edges[j + 20, j, 0]['arcTime'] = 2
for i in range(j + 1, j + 8):
T.edges[i, i - 1, 0]['arcTime'] = 2
T.edges[i - 1, i, 0]['arcTime'] = 2
T.edges[i, i + 1, 0]['arcTime'] = 2
T.edges[i + 1, i, 0]['arcTime'] = 2
T.edges[i, i - 20, 0]['arcTime'] = 2
T.edges[i - 20, i, 0]['arcTime'] = 2
T.edges[i, i + 20, 0]['arcTime'] = 2
T.edges[i + 20, i, 0]['arcTIme'] = 2
T.edges[j + 8, j + 8 - 1, 0]['arcTime'] = 2
T.edges[j + 8 - 1, j + 8, 0]['arcTIme'] = 2
T.edges[j + 8, j + 8 - 1, 0]['arcTime'] = 2
T.edges[j + 8 - 1, j + 8, 0]['arcTime'] = 2
T.edges[j + 8, j + 8 - 20, 0]['arcTime'] = 2
T.edges[j + 8 - 20, j + 8, 0]['arcTime'] = 2
T.edges[362, 363, 0]['arcTime'] = 2 # 设置第19row的weight
T.edges[363, 362, 0]['arcTime'] = 2
T.edges[362, 342, 0]['arcTime'] = 2
T.edges[342, 362, 0]['arcTime'] = 2
for i in range(363, 370):
T.edges[i, i - 1, 0]['arcTime'] = 2
T.edges[i - 1, i, 0]['arcTime'] = 2
T.edges[i, i + 1, 0]['arcTime'] = 2
T.edges[i + 1, i, 0]['arcTime'] = 2
T.edges[i, i - 20, 0]['arcTime'] = 2
T.edges[i - 20, i, 0]['arcTime'] = 2
T.edges[370, 369, 0]['arcTime'] = 2
T.edges[369, 370, 0]['arcTime'] = 2
T.edges[370, 350, 0]['arcTime'] = 2
T.edges[350, 370, 0]['arcTime'] = 2
return T
else:
# manhattan的图对象小弧数据未知
pass
def generate_distribution(self):
"""
对origin和destination进行均匀分布采样
:para num: 产生的观察样本的数量
:return: 返回origin和destination的均匀列表
"""
if self.type == 0:
# <随机分布模块>
origin_observations = [] # 产生均匀分布的origin
for i in range(self.num):
origin_observations.append(round(random.uniform(1, 25)))
destination_observations = [] # 产生均匀分布的destination
for i in range(self.num):
destination_observations.append(round(random.uniform(1, 25)))
origin_destination_observations = [] # 产生均匀分布的origin和destination
for i in range(self.num):
if origin_observations[i] != destination_observations[i]:
origin_destination_observations.append([origin_observations[i], destination_observations[i]])
return origin_destination_observations
elif self.type == 1:
# <随机分布模块>
origin_observations = [] # 产生均匀分布的origin
for i in range(self.num):
origin_observations.append(round(random.uniform(1, 400)))
destination_observations = [] # 产生均匀分布的destination
for i in range(self.num):
destination_observations.append(round(random.uniform(1, 400)))
origin_destination_observations = [] # 产生均匀分布的origin和destination
for i in range(self.num):
if origin_observations[i] != destination_observations[i]:
origin_destination_observations.append([origin_observations[i], destination_observations[i]])
return origin_destination_observations
else:
# 真实数据不需要生成仿真数据
pass
def lognormal_distribution(self, origin, destination):
T = self.True_Graph()
travelTime, path = self.modified_dijkstras(T, origin, destination)
mu = math.log(travelTime)
return random.lognormvariate(mu, self.sigma)
def get_observations(self): # get_observations是一个生成器
"""Return a generator that yields observation objects"""
origin_destination_observations = self.generate_distribution()
for i in range(len(origin_destination_observations)):
traveltime = self.lognormal_distribution(origin_destination_observations[i][0],
origin_destination_observations[i][1])
yield World.Observation(traveltime, origin_destination_observations[i][0],
origin_destination_observations[i][1])
def project(self, G, lng, lat):
"""
将某个点的坐标按照欧式距离映射到网络中最近的拓扑点上
:Param G: 拓扑图
:Param lng: 经度
:Param lat: 纬度
:Return: 返回最近的点的OSMid
"""
nearest_node = None
shortest_distance = inf
for n, d in G.nodes(data=True):
# d['x']是经度,d['y']是纬度
new_shortest_distance = ox.distance.euclidean_dist_vec(lng, lat, d['x'], d['y'])
if new_shortest_distance < shortest_distance:
nearest_node = n
shortest_distance = new_shortest_distance
return nearest_node, shortest_distance
def get_df_observations(self):
"""
将观察的样本数据存到同级文件夹data中的observed_data.csv文件中,并读取成dataframe格式
:return: 返回观察的样本数据的dataframe格式
"""
if self.type == 0:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'small_synthetic_observed_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_observed_data = pd.read_csv("../train_dataset/small_synthetic_observed_data.csv")
return df_observed_data
elif self.type == 1:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'normal_synthetic_observed_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_observed_data = pd.read_csv("../train_dataset/normal_synthetic_observed_data.csv")
return df_observed_data
else:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# 将network对象转换成geodatafram对象
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/dataset.csv")
df_dataset['dist'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[1] +
self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist'] <= 0.002]
df_dataset.to_csv("../train_dataset/processed_dataset.csv")
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/processed_dataset.csv")
# 注意axis=1的使用
df_dataset['pickup_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[0], axis=1)
df_dataset['dropoff_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[0], axis=1)
# d['x']是经度, d['y']是纬度
df_dataset['projected_pickup_longitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['x'],
axis=1)
df_dataset['projected_pickup_latitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['y'],
axis=1)
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['projected_pickup_longitude']), float(row['projected_pickup_latitude'])),
axis=1)
# 转换dataframe成goedataframe
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'real_observed_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin_osmid,destination_osmid\n')
for i in range(len(df_dataset_geo)):
if df_dataset_geo.iloc[i, 11] != df_dataset_geo.iloc[i, 12] and df_dataset_geo.iloc[
i, 11] / 60 >= 1 and df_dataset_geo.iloc[i, 11] / 60 <= 60:
f.write('{0},{1},{2}\n'.format(df_dataset_geo.iloc[i, 11] / 60, df_dataset_geo.iloc[i, 13],
df_dataset_geo.iloc[i, 14]))
df_observed_data = pd.read_csv("../train_dataset/real_observed_data.csv")
return df_observed_data
def get_train_dataset(self):
"""
将观察的样本数据存到同级文件夹data中的observed_data.csv文件中,并读取成dataframe格式
:return: 返回观察的样本数据的dataframe格式
"""
if self.type == 0:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'small_train_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_train_data = pd.read_csv("../train_dataset/small_train_data.csv")
return df_train_data
elif self.type == 1:
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'normal_train_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin,destination\n')
for item in self.get_observations():
if item[1] != item[2]:
f.write('{0},{1},{2}\n'.format(item[0], item[1], item[2]))
df_train_data = pd.read_csv("../train_dataset/normal_train_data.csv")
return df_train_data
else:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# 将network对象转换成geodatafram对象
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/train_dataset.csv")
df_dataset['dist'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[1] +
self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist'] <= 0.002]
df_dataset.to_csv("../train_dataset/processed_dataset.csv")
# observe convert to get_nearest_node路网点,转换成路网点的观察数据dataframe
df_dataset = pd.read_csv("../train_dataset/processed_dataset.csv")
# 注意axis=1的使用
df_dataset['pickup_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[0], axis=1)
df_dataset['dropoff_osmid'] = df_dataset.apply(
lambda row: self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[0], axis=1)
# d['x']是经度, d['y']是纬度
df_dataset['projected_pickup_longitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['x'],
axis=1)
df_dataset['projected_pickup_latitude'] = df_dataset.apply(lambda row: G.nodes[row['pickup_osmid']]['y'],
axis=1)
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['projected_pickup_longitude']), float(row['projected_pickup_latitude'])),
axis=1)
# 转换dataframe成goedataframe
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
os.makedirs(os.path.join('..', 'train_dataset'), exist_ok=True) # 创建一个人工数据集,并存储在csv(逗号分隔值)文件
data_file = os.path.join('..', 'train_dataset', 'real_train_data.csv')
with open(data_file, 'w') as f:
f.write('traveltime,origin_osmid,destination_osmid\n')
for i in range(len(df_dataset_geo)):
if df_dataset_geo.iloc[i, 11] != df_dataset_geo.iloc[i, 12] and df_dataset_geo.iloc[
i, 11] / 60 >= 1 and df_dataset_geo.iloc[i, 11] / 60 <= 60:
f.write('{0},{1},{2}\n'.format(df_dataset_geo.iloc[i, 11] / 60, df_dataset_geo.iloc[i, 13],
df_dataset_geo.iloc[i, 14]))
df_train_data = pd.read_csv("../train_dataset/real_train_data.csv")
return df_train_data
def modified_dijkstras(self, G, origin, destination):
"""
最短路算法
:return: 返回一个traveltime和path
"""
count = 0
paths_and_distances = {}
for node in G.nodes():
paths_and_distances[node] = [inf, [origin]]
paths_and_distances[origin][0] = 0
vertices_to_explore = [(0, origin)]
while vertices_to_explore:
current_distance, current_vertex = heappop(vertices_to_explore)
for neighbor in G.neighbors(current_vertex):
edge_weight = G.get_edge_data(current_vertex, neighbor, 0)['arcTime']
new_distance = current_distance + edge_weight
new_path = paths_and_distances[current_vertex][1] + [neighbor]
if new_distance < paths_and_distances[neighbor][0]:
paths_and_distances[neighbor][0] = new_distance
paths_and_distances[neighbor][1] = new_path
heappush(vertices_to_explore, (new_distance, neighbor))
count += 1
return paths_and_distances[destination]
def Graph(self):
"""
加载初始化人工网络
:return: 返回一个加载好的的图G对象
"""
if self.type == 0:
# <载入文件模块>
df_nodelist = pd.read_csv('../train_dataset/smallnodelist.csv')
df_edgelist = pd.read_csv('../train_dataset/smalledgelist.csv')
G = nx.MultiDiGraph() # 初始化图并载入点和边模块
G.add_nodes_from(df_nodelist['node']) # 添加点auto
G.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络weight模块>
# 搜索nodes和edges一个是一个key,另一个是两个key
# 设置点对象的x和y坐标,方便自动生成geometry
for u, d in G.nodes(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
d['y'] = u_lat
d['x'] = u_lng
# d['y'] = 0
# d['x'] = 0
# 双向车道,因此这是一个多重图
for u, v, d in G.edges(data=True): # 设置outside的行程时间
G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in G.edges(data=True):
G.edges[u, v, 0]['distance'] = 1
# 设置图对象的crs
G.graph['crs'] = "epsg:4326"
return G
elif self.type == 1:
# <载入文件模块>
df_nodelist = pd.read_csv('../train_dataset/normalnodelist.csv')
df_edgelist = pd.read_csv('../train_dataset/normaledgelist.csv')
G = nx.MultiDiGraph() # 初始化图并载入点和边模块
G.add_nodes_from(df_nodelist['node']) # 添加点auto
G.add_edges_from(zip(df_edgelist['node1'], df_edgelist['node2'])) # 添加边auto
# <设置人工网络weight模块>
# 搜索nodes和edges一个是一个key,另一个是两个key
# 设置点对象的x和y坐标,方便自动生成geometry
for u, d in G.nodes(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
d['y'] = u_lat
d['x'] = u_lng
# d['y'] = 0
# d['x'] = 0
# 双向车道,因此这是一个多重图
for u, v, d in G.edges(data=True): # 设置outside的行程时间
G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in G.edges(data=True):
G.edges[u, v, 0]['distance'] = 1
# 设置图对象的crs
G.graph['crs'] = "epsg:4326"
return G
else:
# <载入文件模块>
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# <设置人工网络weight模块>
# 多重无向图与无向图添加权重的方式不同,d就是属性字典,无向图中G.edges[u,v]是字典而多重无向图G.edges[u,v]不是
for u, v, d in G.edges(data=True): # 设置outside的行程时间
G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in G.edges(data=True):
G.edges[u, v, 0]['distance'] = 1
return G
def optimization_method(self, G, K):
"""
SOCP优化算法
:para G: 初始化得到的或上一次迭代计算得到的网络图
:para K: path set
:return: 更新过弧行程时间的网络图
"""
if self.type == 0:
# <读取数据>
df_observed_data = pd.read_csv('../train_dataset/small_synthetic_observed_data.csv')
W = df_observed_data # 有旅行时间数据的origin,destination集合:观察集合W
E = G.edges # 所有的小弧的集合:arc集合E
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
# <定义模型>
m = Model("SOCP model")
# <定义参数>
time_limit = self.time_limit
reg = self.reg # 需要针对问题规模灵活选择
# <定义自变量>
names = locals()
# 变量1:t_ij
for node1, node2, temp in E: # 定义小弧的行程时间估计变量t_ij
names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='arc_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量2:T_hat
for i in range(W.shape[0]): # 定义旅行的行程时间估计变量T^hat
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='trip_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量3:x_od
for i in range(W.shape[0]): # 定义行程时间估计的误差x_od
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='error_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
for node1, node2, temp in E: # 定义绝对值线性化
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
names['abs_' + 'node1_' + str(node2) + '_node2_' + str(node1)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node2) + '_node2_' + str(
node1))
# <定义数据结构>
# 数据结构1:P
P = defaultdict(list) # 使用上一次迭代产生的路段行程时间计算本次迭代优化模型的最短路向量
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
P['node1_' + str(origin) + '_node2_' + str(destination)] = \
self.modified_dijkstras(G, origin, destination)[1]
# 数据结构2:K
for key, val in P.items(): # W中观察点的路径集合
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
K['node1_' + str(origin) + '_node2_' + str(destination)].append(val)
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(
df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
# <定义约束>
# 11b约束
for i in range(df_observed_data.shape[0]): # 添加最短路约束
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
traveltime, path = self.modified_dijkstras(G, origin, destination)
arcSum = 0
for i in range(len(path) - 1):
node1 = int(path[i])
node2 = int(path[i + 1])
arcSum += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(names['trip_' + 'node1_' + str(origin) + '_node2_' + str(
destination)] == arcSum) # 添加最短路径行程时间等于旅行的行程时间估计变量的线性约束
# 11c约束
if K:
for key, val in K.items():
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
for path in val:
othertime = 0
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i + 1]
othertime += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(
othertime >= names['trip_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) # 符号反了
# 11d约束
for i in range(W.shape[0]): # 添加误差最小的线性约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
m.addConstr(names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] / M[
'observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
# 11e约束
for i in range(W.shape[0]): # # 添加误差最小的范数约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
qexpr1 = names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] - names[
'error_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
qexpr2 = 2 * np.sqrt(M['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
qexpr3 = names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] + names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addQConstr(qexpr1 * qexpr1 + qexpr2 * qexpr2 <= qexpr3 * qexpr3)
# 11f约束
for node1, node2, temp in E: # 加速度限制的线性约束
m.addConstr(names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= time_limit)
# <定义目标函数>
obj = 0
# 添加loss项
for i in range(W.shape[0]):
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
n_od = len(O['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
obj += names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * n_od
# 添加惩罚项
for node1, node2, temp in E:
for node3, node4, temp in E:
# 列表求交集,判断连续弧
arc1 = [node1, node2]
arc2 = [node3, node4]
intersection = list(set(arc1) & set(arc2))
if intersection:
arc1 = names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
arc2 = names['arc_' + 'node1_' + str(node3) + '_node2_' + str(node4)]
dis1 = G.edges[node1, node2, 0]['distance']
dis2 = G.edges[node3, node4, 0]['distance']
m.addConstr(
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= arc1 / dis1 - arc2 / dis2)
m.addConstr(names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= -(
arc1 / dis1 - arc2 / dis2))
obj += reg * names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * 2 / (dis1 + dis2)
# 添加目标函数
m.setObjective(obj)
# <求解模型>
m.optimize()
# print('最优值:',m.objVal)
# for v in m.getVars():
# print("参数", v.varName,'=',v.x)
# <更新结果>
for v in m.getVars():
string = v.varName.split('_')
node1 = int(string[2])
node2 = int(string[4])
if 'arc' in v.varName: # 将arc_node1_num_node2_num的weight更新
G.edges[node1, node2, 0]['arcTime'] = v.x
return G, K, P
elif self.type == 1:
# <读取数据>
df_observed_data = pd.read_csv('../train_dataset/normal_synthetic_observed_data.csv')
W = df_observed_data # 有旅行时间数据的origin,destination集合:观察集合W
E = G.edges # 所有的小弧的集合:arc集合E
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
# <定义模型>
m = Model("SOCP model")
# <定义参数>
time_limit = self.time_limit
reg = self.reg # 需要针对问题规模灵活选择
# <定义自变量>
names = locals()
# 变量1:t_ij
for node1, node2, temp in E: # 定义小弧的行程时间估计变量t_ij
names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='arc_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量2:T_hat
for i in range(W.shape[0]): # 定义旅行的行程时间估计变量T^hat
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='trip_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量3:x_od
for i in range(W.shape[0]): # 定义行程时间估计的误差x_od
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='error_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
for node1, node2, temp in E: # 定义绝对值线性化
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# <定义数据结构>
# 数据结构1:P
P = defaultdict(list) # 使用上一次迭代产生的路段行程时间计算本次迭代优化模型的最短路向量
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
P['node1_' + str(origin) + '_node2_' + str(destination)] = \
self.modified_dijkstras(G, origin, destination)[1]
# 数据结构2:K
for i in range(W.shape[0]): # W中观察点的路径集合
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
K['node1_' + str(origin) + '_node2_' + str(destination)].append(
self.modified_dijkstras(G, origin, destination)[1])
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(
df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
# <定义约束>
# 11b约束
for i in range(df_observed_data.shape[0]): # 添加最短路约束
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
traveltime, path = self.modified_dijkstras(G, origin, destination)
arcSum = 0
for i in range(len(path) - 1):
node1 = int(path[i])
node2 = int(path[i + 1])
arcSum += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(names['trip_' + 'node1_' + str(origin) + '_node2_' + str(
destination)] == arcSum) # 添加最短路径行程时间等于旅行的行程时间估计变量的线性约束
# 11c约束
if K:
for key, val in K.items():
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
for path in val:
othertime = 0
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i + 1]
othertime += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(
othertime >= names['trip_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) # 符号反了
# 11d约束
for i in range(W.shape[0]): # 添加误差最小的线性约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
m.addConstr(names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] / M[
'observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
# 11e约束
for i in range(W.shape[0]): # # 添加误差最小的范数约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
qexpr1 = names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] - names[
'error_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
qexpr2 = 2 * np.sqrt(M['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
qexpr3 = names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] + names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addQConstr(qexpr1 * qexpr1 + qexpr2 * qexpr2 <= qexpr3 * qexpr3)
# 11f约束
for node1, node2, temp in E: # 加速度限制的线性约束
m.addConstr(names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= time_limit)
# <定义目标函数>
obj = 0
# 添加loss项
for i in range(W.shape[0]):
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
n_od = len(O['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
obj += names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * n_od
# 添加惩罚项
for node1, node2, temp in E:
for node3, node4, temp in E:
# 列表求交集,判断连续弧
arc1 = [node1, node2]
arc2 = [node3, node4]
intersection = list(set(arc1) & set(arc2))
if intersection:
arc1 = names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
arc2 = names['arc_' + 'node1_' + str(node3) + '_node2_' + str(node4)]
dis1 = G.edges[node1, node2, 0]['distance']
dis2 = G.edges[node3, node4, 0]['distance']
obj += reg * names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * 2 / (dis1 + dis2)
m.addConstr(
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= arc1 / dis1 - arc2 / dis2)
m.addConstr(names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= -(
arc1 / dis1 - arc2 / dis2))
# 添加目标函数
m.setObjective(obj, gurobipy.GRB.MINIMIZE)
# <求解模型>
m.optimize()
# print('最优值:',m.objVal)
# for v in m.getVars():
# print("参数", v.varName,'=',v.x)
# <更新结果>
for v in m.getVars():
string = v.varName.split('_')
node1 = int(string[2])
node2 = int(string[4])
if 'arc' in v.varName: # 将arc_node1_num_node2_num的weight更新
G.edges[node1, node2, 0]['arcTime'] = v.x
return G, K, P
else:
# <读取数据>
df_observed_data = pd.read_csv('../train_dataset/real_observed_data.csv')
W = df_observed_data # 有旅行时间数据的origin,destination集合:观察集合W
E = G.edges # 所有的小弧的集合:arc集合E
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
# <定义模型>
m = Model("SOCP model")
# <定义参数>
time_limit = self.time_limit
reg = self.reg # 需要针对问题规模灵活选择
# <定义自变量>
names = locals()
# 变量1:t_ij
for node1, node2, temp in E: # 定义小弧的行程时间估计变量t_ij
if temp == 0:
names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='arc_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量2:T_hat
for i in range(W.shape[0]): # 定义旅行的行程时间估计变量T^hat
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='trip_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# 变量3:x_od
for i in range(W.shape[0]): # 定义行程时间估计的误差x_od
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='error_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
for node1, node2, temp in E: # 定义绝对值线性化
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] = m.addVar(vtype=GRB.CONTINUOUS,
name='abs_' + 'node1_' + str(
node1) + '_node2_' + str(
node2))
# <定义数据结构>
# 数据结构1:P
P = defaultdict(list) # 使用上一次迭代产生的路段行程时间计算本次迭代优化模型的最短路向量
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
P['node1_' + str(origin) + '_node2_' + str(destination)] = \
self.modified_dijkstras(G, origin, destination)[1]
# 数据结构2:K
for i in range(W.shape[0]): # W中观察点的路径集合
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
K['node1_' + str(origin) + '_node2_' + str(destination)].append(
self.modified_dijkstras(G, origin, destination)[1])
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(int(W.iloc[i][0]))
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(W.shape[0]):
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
# <定义约束>
# 11b约束
for i in range(W.shape[0]): # 添加最短路约束
origin = int(W.iloc[i][1])
destination = int(W.iloc[i][2])
traveltime, path = self.modified_dijkstras(G, origin, destination)
arcSum = 0
for i in range(len(path) - 1):
node1 = int(path[i])
node2 = int(path[i + 1])
arcSum += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(names['trip_' + 'node1_' + str(origin) + '_node2_' + str(
destination)] == arcSum) # 添加最短路径行程时间等于旅行的行程时间估计变量的线性约束
# 11c约束
if K:
for key, val in K.items():
string = key.split('_')
origin = int(string[1])
destination = int(string[3])
for path in val:
othertime = 0
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i + 1]
othertime += names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addConstr(
othertime >= names['trip_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) # 符号反了
# 11d约束
for i in range(W.shape[0]): # 添加误差最小的线性约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
m.addConstr(names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] / M[
'observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
# 11e约束
for i in range(W.shape[0]): # # 添加误差最小的范数约束
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
qexpr1 = names['trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)] - names[
'error_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
qexpr2 = 2 * np.sqrt(M['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
qexpr3 = names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] + names[
'trip_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
m.addQConstr(qexpr1 * qexpr1 + qexpr2 * qexpr2 <= qexpr3 * qexpr3)
# # 11f约束
# for node1,node2,temp in E: # 加速度限制的线性约束,无解有可能是time_limit的问题
# m.addConstr(names['arc_'+ 'node1_'+str(node1) +'_node2_'+ str(node2)] >= time_limit)
# <定义目标函数>
obj = 0
# 添加loss项
for i in range(W.shape[0]):
node1 = int(W.iloc[i][1])
node2 = int(W.iloc[i][2])
n_od = len(O['observe_' + 'node1_' + str(node1) + '_node2_' + str(node2)])
obj += names['error_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * n_od
# 添加惩罚项
for node1, node2, temp in E:
for node3, node4, temp in E:
# 列表求交集,判断连续弧
arc1 = [node1, node2]
arc2 = [node3, node4]
intersection = list(set(arc1) & set(arc2))
if intersection:
arc1 = names['arc_' + 'node1_' + str(node1) + '_node2_' + str(node2)]
arc2 = names['arc_' + 'node1_' + str(node3) + '_node2_' + str(node4)]
dis1 = G.edges[node1, node2, 0]['distance']
dis2 = G.edges[node3, node4, 0]['distance']
obj += reg * names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] * 2 / (dis1 + dis2)
m.addConstr(
names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= arc1 / dis1 - arc2 / dis2)
m.addConstr(names['abs_' + 'node1_' + str(node1) + '_node2_' + str(node2)] >= -(
arc1 / dis1 - arc2 / dis2))
# 添加目标函数
m.setObjective(obj, gurobipy.GRB.MINIMIZE)
# <求解模型>
m.optimize()
# print('最优值:',m.objVal)
# for v in m.getVars():
# print("参数", v.varName,'=',v.x)
# <更新结果>
for v in m.getVars():
string = v.varName.split('_')
node1 = int(string[2])
node2 = int(string[4])
if 'arc' in v.varName: # 将arc_node1_num_node2_num的weight更新
G.edges[node1, node2, 0]['arcTime'] = v.x
return G, K, P
def diff(self, lastP, P):
count = 0
G = self.Graph()
arc_lastP = defaultdict(list)
for key, val in lastP.items(): # lastP {'node1_num_node2_num':[node1,node2]}
for i in range(len(val) - 1):
origin = val[i]
destination = val[i + 1]
arc_lastP[key].append(str(origin) + str(destination)) # {"node1_num_node2_num": [arc1,arc2]}
arc_P = defaultdict(list)
for key, val in P.items():
for i in range(len(val) - 1):
origin = val[i]
destination = val[i + 1]
arc_P[key].append(str(origin) + str(destination))
for key, val in arc_lastP.items(): # {'origin,destination':[arc1,arc2]}
for arc in val:
if arc not in arc_P[key]:
count += 1
for key, val in arc_P.items():
for arc in val:
if arc not in arc_lastP[key]:
count += 1
return count / len(lastP)
def RMLSB(self, G):
"""
定义一个评价函数,对比小弧之间的误差,仿真数据有真实弧数据,而真实数据中通过与其他算法对比获取gap
G: 训练好的图对象
test_dataset: 输入测试集,测试集的数据是没有经过训练过的
"""
RMLSB = 0
if self.type == 0:
train_dataset = "../train_dataset/small_synthetic_observed_data.csv"
elif self.type == 1:
train_dataset = "../train_dataset/normal_synthetic_observed_data.csv"
else:
train_dataset = "../train_dataset/real_observed_data"
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
df_observed_data = pd.read_csv(train_dataset)
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
for origin in G.nodes():
for destination in G.nodes():
if origin != destination and int(
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) != 0:
observe = M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]
trip = self.modified_dijkstras(G, origin, destination)[0]
print(observe, trip)
RMLSB += math.pow((math.log(trip) - math.log(observe)), 2)
return np.sqrt(RMLSB)
def geo(self, G):
if self.type == 0:
# 载入文件模块
df_nodelist = pd.read_csv('../train_dataset/smallnodelist.csv')
edgelist = []
for u, v, d in G.edges(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
v_lng = df_nodelist[df_nodelist.node == v].values.squeeze()[1]
v_lat = df_nodelist[df_nodelist.node == v].values.squeeze()[2]
G.edges[u, v, 0]['geometry'] = LineString([(u_lng, u_lat), (v_lng, v_lat)])
edge_data = dict()
edge_data['node1'] = u
edge_data['node2'] = v
edge_data.update(d)
edgelist.append(edge_data)
df_edgelist = pd.DataFrame(edgelist)
edgelist_crs = {'init': 'epsg:4326'}
df_edgelist_geo = gpd.GeoDataFrame(df_edgelist, crs=edgelist_crs, geometry=df_edgelist.geometry)
return df_edgelist_geo
elif self.type == 1:
# 载入文件模块
df_nodelist = pd.read_csv('../train_dataset/normalnodelist.csv')
edgelist = []
for u, v, d in G.edges(data=True):
u_lng = df_nodelist[df_nodelist.node == u].values.squeeze()[1]
u_lat = df_nodelist[df_nodelist.node == u].values.squeeze()[2]
v_lng = df_nodelist[df_nodelist.node == v].values.squeeze()[1]
v_lat = df_nodelist[df_nodelist.node == v].values.squeeze()[2]
G.edges[u, v, 0]['geometry'] = LineString([(u_lng, u_lat), (v_lng, v_lat)])
edge_data = dict()
edge_data['node1'] = u
edge_data['node2'] = v
edge_data.update(d)
edgelist.append(edge_data)
df_edgelist = pd.DataFrame(edgelist)
edgelist_crs = {'init': 'epsg:4326'}
df_edgelist_geo = gpd.GeoDataFrame(df_edgelist, crs=edgelist_crs, geometry=df_edgelist.geometry)
return df_edgelist_geo
else:
# 绘图模块
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
return gdf_edges
def train(self):
if self.type == 0:
start_time = time.time()
# 程序起始
# a tracktable algorithm
K = defaultdict(list)
self.get_df_observations()
difference = inf
G = self.Graph()
T = self.True_Graph()
count = 0
while difference >= 0.5:
self.geo(G).plot(column='arcTime', cmap='RdYlGn')
G, K, P = self.optimization_method(G, K)
if count % 2 == 0:
lastP1 = P
else:
lastP2 = P
if count != 0:
difference = self.diff(lastP1, lastP2)
count += 1
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
gdf_nodes.to_file("../smalldata/gdf_nodes" + str(count) + ".geojson", driver="GeoJSON")
gdf_edges.to_file("../smalldata/gdf_edges" + str(count) + ".geojson", driver="GeoJSON")
print(f'正在进行第{count}次迭代,误差为{difference}.')
RMLSB = self.RMLSB(G)
print(f'优化模型当前的RMLSB为{RMLSB}')
# 程序结束
elapsed_time = time.time() - start_time
hour = elapsed_time // 3600
minute = (elapsed_time - hour * 3600) // 60
second = elapsed_time % 60
print(f'inference time cost: {hour} hours, {minute} minutes,{second} seconds')
elif self.type == 1:
start_time = time.time()
# 程序起始
# a tracktable algorithm
K = defaultdict(list)
self.get_df_observations()
difference = inf
G = self.Graph()
T = self.True_Graph()
count = 0
while difference >= 0.5:
self.geo(G).plot(column='arcTime', cmap='RdYlGn')
G, K, P = self.optimization_method(G, K)
if count % 2 == 0:
lastP1 = P
else:
lastP2 = P
if count != 0:
difference = self.diff(lastP1, lastP2)
count += 1
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
gdf_nodes.to_file("../normaldata/gdf_nodes" + str(count) + ".geojson", driver="GeoJSON")
gdf_edges.to_file("../normaldata/gdf_edges" + str(count) + ".geojson", driver="GeoJSON")
print(f'正在进行第{count}次迭代,误差为{difference}.')
RMLSB = self.RMLSB(G)
print(f'优化模型当前的RMLSB为{RMLSB}')
# 程序结束
elapsed_time = time.time() - start_time
hour = elapsed_time // 3600
minute = (elapsed_time - hour * 3600) // 60
second = elapsed_time % 60
print(f'inference time cost: {hour} hours, {minute} minutes,{second} seconds')
else:
start_time = time.time()
# 程序起始
# a tracktable algorithm
K = defaultdict(list)
self.get_df_observations()
difference = inf
G = self.Graph()
count = 0
while difference >= 0.5:
# 第k次迭代
fig, ax = plt.subplots(figsize=(30, 30))
self.geo(G).plot(ax=ax, column='arcTime', cmap='Paired', categorical=True)
ax.set_axis_off()
plt.show()
G, K, P = self.optimization_method(G, K)
if count % 2 == 0:
lastP1 = P
else:
lastP2 = P
if count != 0:
difference = self.diff(lastP1, lastP2)
count += 1
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# 使用apply函数清洗不同的数据类型的列
gdf_edges['osmid'] = gdf_edges.apply(lambda row: 0 if type(row['osmid']) == list else row['osmid'],
axis=1)
gdf_edges = gdf_edges[gdf_edges['osmid'] > 0]
gdf_nodes.to_file("../realdata/gdf_nodes" + str(count) + ".geojson", driver="GeoJSON")
gdf_edges.to_file("../realdata/gdf_edges" + str(count) + ".geojson", driver="GeoJSON")
print(f'正在进行第{count}次迭代,误差为{difference}.')
# 程序结束
elapsed_time = time.time() - start_time
hour = elapsed_time // 3600
minute = (elapsed_time - hour * 3600) // 60
second = elapsed_time % 60
print(f'inference time cost: {hour} hours, {minute} minutes,{second} seconds')
def test(self, G):
"""
G: 输入训练好的图模型
test_dataset: 输入测试集,与训练集不同
"""
if self.type == 0:
test_dataset = "../test_dataset/small_train_data.csv"
elif self.type == 1:
test_dataset = "../test_dataset/normal_train_data.csv"
else:
test_dataset = "../test_dataset/real_train_data.csv"
RMLSB = 0
# <help函数>
def geometric_mean(data): # 计算几何平均数T_od
total = 1
for i in data:
total *= i # 等同于total=total*i
return pow(total, 1 / len(data))
df_observed_data = pd.read_csv(test_dataset)
# 数据结构3:所有观察样本
O = defaultdict(list) # origin和destination的行程时间列表
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)].append(df_observed_data.iloc[i][0])
# 数据结构4:所有观察样本时间的几何平均
M = defaultdict(int) # origin和destination的行程时间几何平均值
for i in range(df_observed_data.shape[0]):
origin = int(df_observed_data.iloc[i][1])
destination = int(df_observed_data.iloc[i][2])
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)] = geometric_mean(
O['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)])
for origin in G.nodes():
for destination in G.nodes():
if origin != destination and int(
M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]) != 0:
observe = M['observe_' + 'node1_' + str(origin) + '_node2_' + str(destination)]
trip = self.modified_dijkstras(G, origin, destination)[0]
RMLSB += math.pow((math.log(trip) - math.log(observe)), 2)
return np.sqrt(RMLSB)
class Visualization:
def __init__(self, G, type=0, manual=True):
self.G = G
self.type = type
self.manual = manual
def Graph(self):
"""
加载初始化人工网络
:return: 返回一个加载好的的图G对象
"""
# <设置人工网络weight模块>
# 多重无向图与无向图添加权重的方式不同,d就是属性字典,无向图中G.edges[u,v]是字典而多重无向图G.edges[u,v]不是
for u, v, d in self.G.edges(data=True): # 设置outside的行程时间
self.G.edges[u, v, 0]['arcTime'] = 1
for u, v, d in self.G.edges(data=True):
self.G.edges[u, v, 0]['distance'] = 1
return self.G
def project(self, G, lng, lat):
"""
将某个点的坐标按照欧式距离映射到网络中最近的拓扑点上
:Param G: 拓扑图
:Param lng: 经度
:Param lat: 纬度
:Return: 返回最近的点的OSMid
"""
nearest_node = None
shortest_distance = inf
for n, d in G.nodes(data=True):
# d['x']是经度,d['y']是纬度
new_shortest_distance = ox.distance.euclidean_dist_vec(lng, lat, d['x'], d['y'])
if new_shortest_distance < shortest_distance:
nearest_node = n
shortest_distance = new_shortest_distance
return nearest_node, shortest_distance
def modified_dijkstras(self, origin, destination):
"""
最短路算法
:return: 返回一个traveltime和path
"""
count = 0
paths_and_distances = {}
for node in self.G.nodes():
paths_and_distances[node] = [inf, [origin]]
paths_and_distances[origin][0] = 0
vertices_to_explore = [(0, origin)]
while vertices_to_explore:
current_distance, current_vertex = heappop(vertices_to_explore)
for neighbor in self.G.neighbors(current_vertex):
# get_edge_data得到的是嵌套字典
edge_weight = self.G.get_edge_data(current_vertex, neighbor)[0]['arcTime']
new_distance = current_distance + edge_weight
new_path = paths_and_distances[current_vertex][1] + [neighbor]
if new_distance < paths_and_distances[neighbor][0]:
paths_and_distances[neighbor][0] = new_distance
paths_and_distances[neighbor][1] = new_path
heappush(vertices_to_explore, (new_distance, neighbor))
count += 1
return paths_and_distances[destination]
def plot_path_evolution(G):
plt.show()
def plot_taxi_position(self, map=True, kind=0):
if map == False:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
df_dataset = pd.read_csv("../train_dataset/dataset.csv")
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['pickup_longitude']), float(row['pickup_latitude'])), axis=1)
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
fig, ax = plt.subplots(figsize=(30, 30))
df_dataset_geo.plot(ax=ax, color='green', markersize=1)
gdf_edges.plot(ax=ax, cmap='Reds')
ax.set_axis_off()
plt.show()
else:
# 获取manhattan的networkx对象
G = ox.graph_from_place('Manhattan, New York City, New York, USA', network_type='drive')
# 将network对象转换成geodatafram对象
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
df_dataset = pd.read_csv("../train_dataset/dataset.csv")
if kind == 0:
df_dataset['dist'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist'] <= 0.001]
df_dataset.to_csv("../train_dataset/processdataset.csv")
# 绘制没映射之前的多层图
df_dataset = pd.read_csv("../train_dataset/processdataset.csv")
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['pickup_longitude']), float(row['pickup_latitude'])), axis=1)
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
fig, ax = plt.subplots(figsize=(30, 30))
df_dataset_geo.plot(ax=ax, color='green', markersize=1)
gdf_edges.plot(ax=ax, cmap='Reds')
ax.set_axis_off()
plt.show()
elif kind == 1:
df_dataset['dist'] = df_dataset.apply(
lambda row: self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist'] <= 0.001]
df_dataset.to_csv("../train_dataset/processdataset.csv")
# 绘制没映射之前的多层图
df_dataset = pd.read_csv("../train_dataset/processdataset.csv")
df_dataset['geometry'] = df_dataset.apply(
lambda row: Point(float(row['dropoff_longitude']), float(row['dropoff_latitude'])), axis=1)
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
fig, ax = plt.subplots(figsize=(30, 30))
df_dataset_geo.plot(ax=ax, color='green', markersize=1)
gdf_edges.plot(ax=ax, cmap='Reds')
ax.set_axis_off()
plt.show()
else:
df_dataset['dist1'] = df_dataset.apply(
lambda row: self.project(G, row['pickup_longitude'], row['pickup_latitude'])[1], axis=1)
df_dataset['dist2'] = df_dataset.apply(
lambda row: self.project(G, row['dropoff_longitude'], row['dropoff_latitude'])[1], axis=1)
df_dataset = df_dataset[df_dataset['dist1'] <= 0.001 and df_dataset['dist2'] <= 0.001]
df_dataset.to_csv("../train_dataset/processdataset.csv")
# 绘制没映射之前的多层图
df_dataset = pd.read_csv("../train_dataset/processdataset.csv")
df_dataset['geometry'] = df_dataset.apply(lambda row: LineString(
[(float(row['pickup_longitude']), float(row['pickup_latitude'])),
(float(row['dropoff_longitude']), float(row['dropoff_latitude']))]), axis=1)
df_dataset_geo = gpd.GeoDataFrame(df_dataset, crs=gdf_edges.crs, geometry=df_dataset.geometry)
fig, ax = plt.subplots(figsize=(30, 30))
df_dataset_geo.plot(ax=ax, color='green', markersize=1)
gdf_edges.plot(ax=ax, cmap='Reds')
ax.set_axis_off()
plt.show()
def plot_normal_path(self, origin, destination):
# 载入文件模块
df_nodelist =
|
pd.read_csv('../train_dataset/normalnodelist.csv')
|
pandas.read_csv
|
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, pd.Series(index=dr, data=expected))
# now use optional arguments
temp_model_params.update({'transmittance_absorptance': 0.8,
'array_height': 2,
'mount_standoff': 2.0})
expected = 60.477703576
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed,
effective_irradiance=1100.)
assert_allclose(out, expected)
def test_PVSystem_noct_celltemp_error():
poa_global, temp_air, wind_speed, module_efficiency = (1000., 25., 1., 0.2)
temp_model_params = {'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
with pytest.raises(KeyError):
system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_functions(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad_one = pd.Series(1000, index=times)
irrad_two = pd.Series(500, index=times)
temp_air = pd.Series(25, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system, (irrad_one, irrad_two), temp_air, wind_speed)
assert (temp_one != temp_two).all()
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_temp(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air_one = pd.Series(25, index=times)
temp_air_two = pd.Series(5, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_one, temp_air_two),
wind_speed
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_two, temp_air_one),
wind_speed
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_wind(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air = pd.Series(25, index=times)
wind_speed_one = pd.Series(1, index=times)
wind_speed_two = pd.Series(5, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_one, wind_speed_two)
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_two, wind_speed_one)
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1,), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1, 1, 1), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1,))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1, 1, 1))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_poa_length_mismatch(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, 1000, 25, 1)
def test_PVSystem_fuentes_celltemp(mocker):
noct_installed = 45
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
spy = mocker.spy(temperature, 'fuentes')
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
out = system.fuentes_celltemp(irrads, temps, winds)
assert_series_equal(spy.call_args[0][0], irrads)
assert_series_equal(spy.call_args[0][1], temps)
assert_series_equal(spy.call_args[0][2], winds)
assert spy.call_args[1]['noct_installed'] == noct_installed
assert_series_equal(out, pd.Series([52.85, 55.85, 55.85], index,
name='tmod'))
def test_PVSystem_fuentes_celltemp_override(mocker):
# test that the surface_tilt value in the cell temp calculation can be
# overridden but defaults to the surface_tilt attribute of the PVSystem
spy = mocker.spy(temperature, 'fuentes')
noct_installed = 45
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
# uses default value
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 20
# can be overridden
temp_model_params = {'noct_installed': noct_installed, 'surface_tilt': 30}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 30
def test_Array__infer_temperature_model_params():
array = pvsystem.Array(module_parameters={},
racking_model='open_rack',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'sapm']['open_rack_glass_polymer']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='freestanding',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['freestanding']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='insulated',
module_type=None)
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['insulated']
assert expected == array._infer_temperature_model_params()
def test_Array__infer_cell_type():
array = pvsystem.Array(module_parameters={})
assert array._infer_cell_type() is None
def test_calcparams_desoto(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.096], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_cec(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_cec(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
Adjust=cec_module_params['Adjust'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.0896], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_pvsyst(pvsyst_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
temp_cell = pd.Series([25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_pvsyst(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'])
assert_series_equal(
IL.round(decimals=3), pd.Series([0.0, 4.8200], index=times))
assert_series_equal(
I0.round(decimals=3), pd.Series([0.0, 1.47e-7], index=times))
assert_allclose(Rs, 0.500)
assert_series_equal(
Rsh.round(decimals=3), pd.Series([1000.0, 305.757], index=times))
assert_series_equal(
nNsVth.round(decimals=4), pd.Series([1.6186, 1.7961], index=times))
def test_PVSystem_calcparams_desoto(cec_module_params, mocker):
mocker.spy(pvsystem, 'calcparams_desoto')
module_parameters = cec_module_params.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
system = pvsystem.PVSystem(module_parameters=module_parameters)
effective_irradiance = np.array([0, 800])
temp_cell = 25
IL, I0, Rs, Rsh, nNsVth = system.calcparams_desoto(effective_irradiance,
temp_cell)
pvsystem.calcparams_desoto.assert_called_once_with(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=module_parameters['EgRef'],
dEgdT=module_parameters['dEgdT'])
assert_allclose(IL, np.array([0.0, 6.036]), atol=1)
assert_allclose(I0, 2.0e-9, atol=1.0e-9)
assert_allclose(Rs, 0.1, atol=0.1)
assert_allclose(Rsh, np.array([np.inf, 20]), atol=1)
assert_allclose(nNsVth, 0.5, atol=0.1)
def test_PVSystem_calcparams_pvsyst(pvsyst_module_params, mocker):
mocker.spy(pvsystem, 'calcparams_pvsyst')
module_parameters = pvsyst_module_params.copy()
system = pvsystem.PVSystem(module_parameters=module_parameters)
effective_irradiance = np.array([0, 800])
temp_cell = np.array([25, 50])
IL, I0, Rs, Rsh, nNsVth = system.calcparams_pvsyst(effective_irradiance,
temp_cell)
pvsystem.calcparams_pvsyst.assert_called_once_with(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'],
R_sh_exp=pvsyst_module_params['R_sh_exp'])
assert_allclose(IL, np.array([0.0, 4.8200]), atol=1)
assert_allclose(I0, np.array([0.0, 1.47e-7]), atol=1.0e-5)
assert_allclose(Rs, 0.5, atol=0.1)
assert_allclose(Rsh, np.array([1000, 305.757]), atol=50)
assert_allclose(nNsVth, np.array([1.6186, 1.7961]), atol=0.1)
@pytest.mark.parametrize('calcparams', [pvsystem.PVSystem.calcparams_pvsyst,
pvsystem.PVSystem.calcparams_desoto,
pvsystem.PVSystem.calcparams_cec])
def test_PVSystem_multi_array_calcparams(calcparams, two_array_system):
params_one, params_two = calcparams(
two_array_system, (1000, 500), (30, 20)
)
assert params_one != params_two
@pytest.mark.parametrize('calcparams, irrad, celltemp',
[ (f, irrad, celltemp)
for f in (pvsystem.PVSystem.calcparams_desoto,
pvsystem.PVSystem.calcparams_cec,
pvsystem.PVSystem.calcparams_pvsyst)
for irrad, celltemp in [(1, (1, 1)), ((1, 1), 1)]])
def test_PVSystem_multi_array_calcparams_value_error(
calcparams, irrad, celltemp, two_array_system):
with pytest.raises(ValueError,
match='Length mismatch for per-array parameter'):
calcparams(two_array_system, irrad, celltemp)
@pytest.fixture(params=[
{ # Can handle all python scalar inputs
'Rsh': 20.,
'Rs': 0.1,
'nNsVth': 0.5,
'I': 3.,
'I0': 6.e-7,
'IL': 7.,
'V_expected': 7.5049875193450521
},
{ # Can handle all rank-0 array inputs
'Rsh': np.array(20.),
'Rs': np.array(0.1),
'nNsVth': np.array(0.5),
'I': np.array(3.),
'I0': np.array(6.e-7),
'IL': np.array(7.),
'V_expected': np.array(7.5049875193450521)
},
{ # Can handle all rank-1 singleton array inputs
'Rsh': np.array([20.]),
'Rs': np.array([0.1]),
'nNsVth': np.array([0.5]),
'I': np.array([3.]),
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'V_expected': np.array([7.5049875193450521])
},
{ # Can handle all rank-1 non-singleton array inputs with infinite shunt
# resistance, Rsh=inf gives V=Voc=nNsVth*(np.log(IL + I0) - np.log(I0)
# at I=0
'Rsh': np.array([np.inf, 20.]),
'Rs': np.array([0.1, 0.1]),
'nNsVth': np.array([0.5, 0.5]),
'I': np.array([0., 3.]),
'I0': np.array([6.e-7, 6.e-7]),
'IL': np.array([7., 7.]),
'V_expected': np.array([0.5*(np.log(7. + 6.e-7) - np.log(6.e-7)),
7.5049875193450521])
},
{ # Can handle mixed inputs with a rank-2 array with infinite shunt
# resistance, Rsh=inf gives V=Voc=nNsVth*(np.log(IL + I0) - np.log(I0)
# at I=0
'Rsh': np.array([[np.inf, np.inf], [np.inf, np.inf]]),
'Rs': np.array([0.1]),
'nNsVth': np.array(0.5),
'I': 0.,
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'V_expected': 0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))*np.ones((2, 2))
},
{ # Can handle ideal series and shunt, Rsh=inf and Rs=0 give
# V = nNsVth*(np.log(IL - I + I0) - np.log(I0))
'Rsh': np.inf,
'Rs': 0.,
'nNsVth': 0.5,
'I': np.array([7., 7./2., 0.]),
'I0': 6.e-7,
'IL': 7.,
'V_expected': np.array([0., 0.5*(np.log(7. - 7./2. + 6.e-7) -
np.log(6.e-7)), 0.5*(np.log(7. + 6.e-7) -
np.log(6.e-7))])
},
{ # Can handle only ideal series resistance, no closed form solution
'Rsh': 20.,
'Rs': 0.,
'nNsVth': 0.5,
'I': 3.,
'I0': 6.e-7,
'IL': 7.,
'V_expected': 7.804987519345062
},
{ # Can handle all python scalar inputs with big LambertW arg
'Rsh': 500.,
'Rs': 10.,
'nNsVth': 4.06,
'I': 0.,
'I0': 6.e-10,
'IL': 1.2,
'V_expected': 86.320000493521079
},
{ # Can handle all python scalar inputs with bigger LambertW arg
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225 (this appears to be from PR 226 not issue 225)
'Rsh': 190.,
'Rs': 1.065,
'nNsVth': 2.89,
'I': 0.,
'I0': 7.05196029e-08,
'IL': 10.491262,
'V_expected': 54.303958833791455
},
{ # Can handle all python scalar inputs with bigger LambertW arg
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225
'Rsh': 381.68,
'Rs': 1.065,
'nNsVth': 2.681527737715915,
'I': 0.,
'I0': 1.8739027472625636e-09,
'IL': 5.1366949999999996,
'V_expected': 58.19323124611128
},
{ # Verify mixed solution type indexing logic
'Rsh': np.array([np.inf, 190., 381.68]),
'Rs': 1.065,
'nNsVth': np.array([2.89, 2.89, 2.681527737715915]),
'I': 0.,
'I0': np.array([7.05196029e-08, 7.05196029e-08, 1.8739027472625636e-09]),
'IL': np.array([10.491262, 10.491262, 5.1366949999999996]),
'V_expected': np.array([2.89*np.log1p(10.491262/7.05196029e-08),
54.303958833791455, 58.19323124611128])
}])
def fixture_v_from_i(request):
return request.param
@pytest.mark.parametrize(
'method, atol', [('lambertw', 1e-11), ('brentq', 1e-11), ('newton', 1e-8)]
)
def test_v_from_i(fixture_v_from_i, method, atol):
# Solution set loaded from fixture
Rsh = fixture_v_from_i['Rsh']
Rs = fixture_v_from_i['Rs']
nNsVth = fixture_v_from_i['nNsVth']
I = fixture_v_from_i['I']
I0 = fixture_v_from_i['I0']
IL = fixture_v_from_i['IL']
V_expected = fixture_v_from_i['V_expected']
V = pvsystem.v_from_i(Rsh, Rs, nNsVth, I, I0, IL, method=method)
assert(isinstance(V, type(V_expected)))
if isinstance(V, type(np.ndarray)):
assert(isinstance(V.dtype, type(V_expected.dtype)))
assert(V.shape == V_expected.shape)
assert_allclose(V, V_expected, atol=atol)
def test_i_from_v_from_i(fixture_v_from_i):
# Solution set loaded from fixture
Rsh = fixture_v_from_i['Rsh']
Rs = fixture_v_from_i['Rs']
nNsVth = fixture_v_from_i['nNsVth']
I = fixture_v_from_i['I']
I0 = fixture_v_from_i['I0']
IL = fixture_v_from_i['IL']
V = fixture_v_from_i['V_expected']
# Convergence criteria
atol = 1.e-11
I_expected = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL,
method='lambertw')
assert_allclose(I, I_expected, atol=atol)
I = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL)
assert(isinstance(I, type(I_expected)))
if isinstance(I, type(np.ndarray)):
assert(isinstance(I.dtype, type(I_expected.dtype)))
assert(I.shape == I_expected.shape)
assert_allclose(I, I_expected, atol=atol)
@pytest.fixture(params=[
{ # Can handle all python scalar inputs
'Rsh': 20.,
'Rs': 0.1,
'nNsVth': 0.5,
'V': 7.5049875193450521,
'I0': 6.e-7,
'IL': 7.,
'I_expected': 3.
},
{ # Can handle all rank-0 array inputs
'Rsh': np.array(20.),
'Rs': np.array(0.1),
'nNsVth': np.array(0.5),
'V': np.array(7.5049875193450521),
'I0': np.array(6.e-7),
'IL': np.array(7.),
'I_expected': np.array(3.)
},
{ # Can handle all rank-1 singleton array inputs
'Rsh': np.array([20.]),
'Rs': np.array([0.1]),
'nNsVth': np.array([0.5]),
'V': np.array([7.5049875193450521]),
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'I_expected': np.array([3.])
},
{ # Can handle all rank-1 non-singleton array inputs with a zero
# series resistance, Rs=0 gives I=IL=Isc at V=0
'Rsh': np.array([20., 20.]),
'Rs': np.array([0., 0.1]),
'nNsVth': np.array([0.5, 0.5]),
'V': np.array([0., 7.5049875193450521]),
'I0': np.array([6.e-7, 6.e-7]),
'IL': np.array([7., 7.]),
'I_expected': np.array([7., 3.])
},
{ # Can handle mixed inputs with a rank-2 array with zero series
# resistance, Rs=0 gives I=IL=Isc at V=0
'Rsh': np.array([20.]),
'Rs': np.array([[0., 0.], [0., 0.]]),
'nNsVth': np.array(0.5),
'V': 0.,
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'I_expected': np.array([[7., 7.], [7., 7.]])
},
{ # Can handle ideal series and shunt, Rsh=inf and Rs=0 give
# V_oc = nNsVth*(np.log(IL + I0) - np.log(I0))
'Rsh': np.inf,
'Rs': 0.,
'nNsVth': 0.5,
'V': np.array([0., 0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))/2.,
0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))]),
'I0': 6.e-7,
'IL': 7.,
'I_expected': np.array([7., 7. - 6.e-7*np.expm1((np.log(7. + 6.e-7) -
np.log(6.e-7))/2.), 0.])
},
{ # Can handle only ideal shunt resistance, no closed form solution
'Rsh': np.inf,
'Rs': 0.1,
'nNsVth': 0.5,
'V': 7.5049875193450521,
'I0': 6.e-7,
'IL': 7.,
'I_expected': 3.2244873645510923
}])
def fixture_i_from_v(request):
return request.param
@pytest.mark.parametrize(
'method, atol', [('lambertw', 1e-11), ('brentq', 1e-11), ('newton', 1e-11)]
)
def test_i_from_v(fixture_i_from_v, method, atol):
# Solution set loaded from fixture
Rsh = fixture_i_from_v['Rsh']
Rs = fixture_i_from_v['Rs']
nNsVth = fixture_i_from_v['nNsVth']
V = fixture_i_from_v['V']
I0 = fixture_i_from_v['I0']
IL = fixture_i_from_v['IL']
I_expected = fixture_i_from_v['I_expected']
I = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL, method=method)
assert(isinstance(I, type(I_expected)))
if isinstance(I, type(np.ndarray)):
assert(isinstance(I.dtype, type(I_expected.dtype)))
assert(I.shape == I_expected.shape)
assert_allclose(I, I_expected, atol=atol)
def test_PVSystem_i_from_v(mocker):
system = pvsystem.PVSystem()
m = mocker.patch('pvlib.pvsystem.i_from_v', autospec=True)
args = (20, 0.1, 0.5, 7.5049875193450521, 6e-7, 7)
system.i_from_v(*args)
m.assert_called_once_with(*args)
def test_i_from_v_size():
with pytest.raises(ValueError):
pvsystem.i_from_v(20, [0.1] * 2, 0.5, [7.5] * 3, 6.0e-7, 7.0)
with pytest.raises(ValueError):
pvsystem.i_from_v(20, [0.1] * 2, 0.5, [7.5] * 3, 6.0e-7, 7.0,
method='brentq')
with pytest.raises(ValueError):
pvsystem.i_from_v(20, 0.1, 0.5, [7.5] * 3, 6.0e-7, np.array([7., 7.]),
method='newton')
def test_v_from_i_size():
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1] * 2, 0.5, [3.0] * 3, 6.0e-7, 7.0)
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1] * 2, 0.5, [3.0] * 3, 6.0e-7, 7.0,
method='brentq')
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1], 0.5, [3.0] * 3, 6.0e-7, np.array([7., 7.]),
method='newton')
def test_mpp_floats():
"""test max_power_point"""
IL, I0, Rs, Rsh, nNsVth = (7, 6e-7, .1, 20, .5)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = {'i_mp': 6.1362673597376753, # 6.1390251797935704, lambertw
'v_mp': 6.2243393757884284, # 6.221535886625464, lambertw
'p_mp': 38.194210547580511} # 38.194165464983037} lambertw
assert isinstance(out, dict)
for k, v in out.items():
assert np.isclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.isclose(v, expected[k])
def test_mpp_array():
"""test max_power_point"""
IL, I0, Rs, Rsh, nNsVth = (np.array([7, 7]), 6e-7, .1, 20, .5)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = {'i_mp': [6.1362673597376753] * 2,
'v_mp': [6.2243393757884284] * 2,
'p_mp': [38.194210547580511] * 2}
assert isinstance(out, dict)
for k, v in out.items():
assert np.allclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.allclose(v, expected[k])
def test_mpp_series():
"""test max_power_point"""
idx = ['2008-02-17T11:30:00-0800', '2008-02-17T12:30:00-0800']
IL, I0, Rs, Rsh, nNsVth = (np.array([7, 7]), 6e-7, .1, 20, .5)
IL = pd.Series(IL, index=idx)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = pd.DataFrame({'i_mp': [6.1362673597376753] * 2,
'v_mp': [6.2243393757884284] * 2,
'p_mp': [38.194210547580511] * 2},
index=idx)
assert isinstance(out, pd.DataFrame)
for k, v in out.items():
assert np.allclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.allclose(v, expected[k])
def test_singlediode_series(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell=25,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677
)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth)
assert isinstance(out, pd.DataFrame)
def test_singlediode_array():
# github issue 221
photocurrent = np.linspace(0, 10, 11)
resistance_shunt = 16
resistance_series = 0.094
nNsVth = 0.473
saturation_current = 1.943e-09
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
method='lambertw')
expected = np.array([
0. , 0.54538398, 1.43273966, 2.36328163, 3.29255606,
4.23101358, 5.16177031, 6.09368251, 7.02197553, 7.96846051,
8.88220557])
assert_allclose(sd['i_mp'], expected, atol=0.01)
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
expected = pvsystem.i_from_v(resistance_shunt, resistance_series, nNsVth,
sd['v_mp'], saturation_current, photocurrent,
method='lambertw')
assert_allclose(sd['i_mp'], expected, atol=0.01)
def test_singlediode_floats():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, method='lambertw')
expected = {'i_xx': 4.2498,
'i_mp': 6.1275,
'v_oc': 8.1063,
'p_mp': 38.1937,
'i_x': 6.7558,
'i_sc': 6.9651,
'v_mp': 6.2331,
'i': None,
'v': None}
assert isinstance(out, dict)
for k, v in out.items():
if k in ['i', 'v']:
assert v is None
else:
assert_allclose(v, expected[k], atol=1e-3)
def test_singlediode_floats_ivcurve():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, ivcurve_pnts=3, method='lambertw')
expected = {'i_xx': 4.2498,
'i_mp': 6.1275,
'v_oc': 8.1063,
'p_mp': 38.1937,
'i_x': 6.7558,
'i_sc': 6.9651,
'v_mp': 6.2331,
'i': np.array([6.965172e+00, 6.755882e+00, 2.575717e-14]),
'v': np.array([0., 4.05315, 8.1063])}
assert isinstance(out, dict)
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-3)
def test_singlediode_series_ivcurve(cec_module_params):
times = pd.date_range(start='2015-06-01', periods=3, freq='6H')
effective_irradiance = pd.Series([0.0, 400.0, 800.0], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell=25,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3,
method='lambertw')
expected = OrderedDict([('i_sc', array([0., 3.01054475, 6.00675648])),
('v_oc', array([0., 9.96886962, 10.29530483])),
('i_mp', array([0., 2.65191983, 5.28594672])),
('v_mp', array([0., 8.33392491, 8.4159707])),
('p_mp', array([0., 22.10090078, 44.48637274])),
('i_x', array([0., 2.88414114, 5.74622046])),
('i_xx', array([0., 2.04340914, 3.90007956])),
('v', array([[0., 0., 0.],
[0., 4.98443481, 9.96886962],
[0., 5.14765242, 10.29530483]])),
('i', array([[0., 0., 0.],
[3.01079860e+00, 2.88414114e+00,
3.10862447e-14],
[6.00726296e+00, 5.74622046e+00,
0.00000000e+00]]))])
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-2)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3)
expected['i_mp'] = pvsystem.i_from_v(Rsh, Rs, nNsVth, out['v_mp'], I0, IL,
method='lambertw')
expected['v_mp'] = pvsystem.v_from_i(Rsh, Rs, nNsVth, out['i_mp'], I0, IL,
method='lambertw')
expected['i'] = pvsystem.i_from_v(Rsh, Rs, nNsVth, out['v'].T, I0, IL,
method='lambertw').T
expected['v'] = pvsystem.v_from_i(Rsh, Rs, nNsVth, out['i'].T, I0, IL,
method='lambertw').T
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-2)
def test_scale_voltage_current_power():
data = pd.DataFrame(
np.array([[2, 1.5, 10, 8, 12, 0.5, 1.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
expected = pd.DataFrame(
np.array([[6, 4.5, 20, 16, 72, 1.5, 4.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
out = pvsystem.scale_voltage_current_power(data, voltage=2, current=3)
assert_frame_equal(out, expected, check_less_precise=5)
def test_PVSystem_scale_voltage_current_power(mocker):
data = None
system = pvsystem.PVSystem(modules_per_string=2, strings_per_inverter=3)
m = mocker.patch(
'pvlib.pvsystem.scale_voltage_current_power', autospec=True)
system.scale_voltage_current_power(data)
m.assert_called_once_with(data, voltage=2, current=3)
def test_PVSystem_multi_scale_voltage_current_power(mocker):
data = (1, 2)
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(modules_per_string=2, strings=3),
pvsystem.Array(modules_per_string=3, strings=5)]
)
m = mocker.patch(
'pvlib.pvsystem.scale_voltage_current_power', autospec=True
)
system.scale_voltage_current_power(data)
m.assert_has_calls(
[mock.call(1, voltage=2, current=3),
mock.call(2, voltage=3, current=5)],
any_order=True
)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.scale_voltage_current_power(None)
def test_PVSystem_get_ac_sandia(cec_inverter_parameters, mocker):
inv_fun = mocker.spy(inverter, 'sandia')
system = pvsystem.PVSystem(
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3))
pdcs = idcs * vdcs
pacs = system.get_ac('sandia', pdcs, v_dc=vdcs)
inv_fun.assert_called_once()
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
@fail_on_pvlib_version('0.10')
def test_PVSystem_snlinverter(cec_inverter_parameters):
system = pvsystem.PVSystem(
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0,50,3))
idcs = pd.Series(np.linspace(0,11,3))
pdcs = idcs * vdcs
with pytest.warns(pvlibDeprecationWarning):
pacs = system.snlinverter(vdcs, pdcs)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_PVSystem_get_ac_sandia_multi(cec_inverter_parameters, mocker):
inv_fun = mocker.spy(inverter, 'sandia_multi')
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3)) / 2
pdcs = idcs * vdcs
pacs = system.get_ac('sandia', (pdcs, pdcs), v_dc=(vdcs, vdcs))
inv_fun.assert_called_once()
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', vdcs, (pdcs, pdcs))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', vdcs, (pdcs,))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', (vdcs, vdcs), (pdcs, pdcs, pdcs))
def test_PVSystem_get_ac_pvwatts(pvwatts_system_defaults, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
out = pvwatts_system_defaults.get_ac('pvwatts', pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_defaults.inverter_parameters)
assert out < pdc
def test_PVSystem_get_ac_pvwatts_kwargs(pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
out = pvwatts_system_kwargs.get_ac('pvwatts', pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_kwargs.inverter_parameters)
assert out < pdc
def test_PVSystem_get_ac_pvwatts_multi(
pvwatts_system_defaults, pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts_multi')
expected = [pd.Series([0.0, 48.123524, 86.400000]),
pd.Series([0.0, 45.893550, 85.500000])]
systems = [pvwatts_system_defaults, pvwatts_system_kwargs]
for base_sys, exp in zip(systems, expected):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter_parameters=base_sys.inverter_parameters,
)
pdcs = pd.Series([0., 25., 50.])
pacs = system.get_ac('pvwatts', (pdcs, pdcs))
assert_series_equal(pacs, exp)
assert inverter.pvwatts_multi.call_count == 2
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', (pdcs,))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', pdcs)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', (pdcs, pdcs, pdcs))
@pytest.mark.parametrize('model', ['sandia', 'adr', 'pvwatts'])
def test_PVSystem_get_ac_single_array_tuple_input(
model,
pvwatts_system_defaults,
cec_inverter_parameters,
adr_inverter_parameters):
vdcs = {
'sandia': pd.Series(np.linspace(0, 50, 3)),
'pvwatts': None,
'adr': pd.Series([135, 154, 390, 420, 551])
}
pdcs = {'adr': pd.Series([135, 1232, 1170, 420, 551]),
'sandia': pd.Series(np.linspace(0, 11, 3)) * vdcs['sandia'],
'pvwatts': 50}
inverter_parameters = {
'sandia': cec_inverter_parameters,
'adr': adr_inverter_parameters,
'pvwatts': pvwatts_system_defaults.inverter_parameters
}
expected = {
'adr': pd.Series([np.nan, 1161.5745, 1116.4459, 382.6679, np.nan]),
'sandia': pd.Series([-0.020000, 132.004308, 250.000000])
}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array()],
inverter_parameters=inverter_parameters[model]
)
ac = system.get_ac(p_dc=(pdcs[model],), v_dc=(vdcs[model],), model=model)
if model == 'pvwatts':
assert ac < pdcs['pvwatts']
else:
assert_series_equal(ac, expected[model])
def test_PVSystem_get_ac_adr(adr_inverter_parameters, mocker):
mocker.spy(inverter, 'adr')
system = pvsystem.PVSystem(
inverter_parameters=adr_inverter_parameters,
)
vdcs =
|
pd.Series([135, 154, 390, 420, 551])
|
pandas.Series
|
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=FEsingle.InputChgSum(df_all,5,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,5,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,5,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,12,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,12,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,12,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,25,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,25,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,25,'net_mf_amount')
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<6]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23_pos(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['tomorrow_chg_rank']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPy(x)).reset_index(0,drop=True)
df_all['tomorrow_chg_rank']=df_all.groupby('ts_code')['tomorrow_chg_rank'].shift(-20)
df_all['tomorrow_chg']=df_all['tomorrow_chg_rank']
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
#df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a41(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
#df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
#df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
#df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
#df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
#df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
#df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#print(df_money_all)
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='std')
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='mean')
##df_data.to_csv('testsee1120.csv')
#print(df_data)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
#df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
#df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
#df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
#df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
#df_all,_=FEsingle.HighLowRange(df_all,5)
#df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
#df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
#df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
#df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
#df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
#df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
#df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
#df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
#df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
#df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
#df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
##df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEfast_a41e(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
#df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
#df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
#df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
#df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
#df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
#df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
#df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#print(df_money_all)
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='std')
#df_data,_=FEsingle.DayFeatureToAll(df_data,name='pct_chg',method='mean')
#df_data['pct_chg_DayFeatureToAll_std_1']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_std'].shift(1)
#df_data['pct_chg_DayFeatureToAll_mean_1']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_mean'].shift(1)
#df_data['pct_chg_DayFeatureToAll_std_2']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_std'].shift(2)
#df_data['pct_chg_DayFeatureToAll_mean_2']=df_data.groupby('ts_code')['pct_chg_DayFeatureToAll_mean'].shift(2)
#将空白的地方填满
df_long_all['pe'].fillna(999,inplace=True)
df_long_all['pb'].fillna(99,inplace=True)
df_long_all['ps_ttm'].fillna(99,inplace=True)
df_long_all['dv_ttm'].fillna(0,inplace=True)
#df_long_all.to_csv('testsee1120.csv')
print(df_long_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
#df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=
|
pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
|
pandas.merge
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
# function for loading data from disk
def load_data():
"""
this function is responsible for loading traing data from disk.
and performs some basic opertaions like
- one-hot encoding
- feature scaling
- reshaping data
Parameters:
(no-parameters)
Returns:
X : numpy array (contains all features of training data)
y : numpy array (contains all targets of traing data)
"""
path = "../data/train.csv"
if(not Path(path).is_file()):
print("[util]: train data not found at '",path,"'")
#quit()
print("[util]: Loading '",path,"'")
train =
|
pd.read_csv(path)
|
pandas.read_csv
|
from .config import on_rtd
import os, re, sys
import warnings
import logging
if not on_rtd:
import pandas as pd
import numpy as np
from scipy.interpolate import LinearNDInterpolator as interpnd
import numpy.random as rand
import matplotlib.pyplot as plt
from astropy import constants as const
#Define useful constants
G = const.G.cgs.value
MSUN = const.M_sun.cgs.value
RSUN = const.R_sun.cgs.value
from .extinction import EXTINCTION, LAMBDA_EFF, extcurve, extcurve_0
from .interp import interp_value, interp_values
else:
G = 6.67e-11
MSUN = 1.99e33
RSUN = 6.96e10
from .config import ISOCHRONES
from .grid import ModelGrid
def get_ichrone(models, bands=None, default=False, **kwargs):
"""Gets Isochrone Object by name, or type, with the right bands
If `default` is `True`, then will set bands
to be the union of bands and default_bands
"""
if isinstance(models, Isochrone):
return models
def actual(bands, ictype):
if bands is None:
return list(ictype.default_bands)
elif default:
return list(set(bands).union(set(ictype.default_bands)))
else:
return bands
if type(models) is type(type):
ichrone = models(actual(bands, models))
elif models=='dartmouth':
from isochrones.dartmouth import Dartmouth_Isochrone
ichrone = Dartmouth_Isochrone(bands=actual(bands, Dartmouth_Isochrone), **kwargs)
elif models=='dartmouthfast':
from isochrones.dartmouth import Dartmouth_FastIsochrone
ichrone = Dartmouth_FastIsochrone(bands=actual(bands, Dartmouth_FastIsochrone), **kwargs)
elif models=='mist':
from isochrones.mist import MIST_Isochrone
ichrone = MIST_Isochrone(bands=actual(bands, MIST_Isochrone), **kwargs)
elif models=='padova':
from isochrones.padova import Padova_Isochrone
ichrone = Padova_Isochrone(bands=actual(bands, Padova_Isochrone), **kwargs)
elif models=='basti':
from isochrones.basti import Basti_Isochrone
ichrone = Basti_Isochrone(bands=actual(bands, Basti_Isochrone), **kwargs)
else:
raise ValueError('Unknown stellar models: {}'.format(models))
return ichrone
class Isochrone(object):
"""
Basic isochrone class. Everything is a function of mass, log(age), Fe/H.
Can be instantiated directly, but will typically be used with a pre-defined
subclass, such as :class:`dartmouth.Dartmouth_Isochrone`. All parameters
must be array-like objects of the same length, with the exception of ``mags``,
which is a dictionary of such array-like objects.
:param m_ini:
Array of initial mass values [msun].
:type m_ini: array-like
:param age:
log10(age) [yr]
:param feh:
Metallicity [dex]
:param m_act:
Actual mass; same as m_ini if mass loss not implemented [msun]
:param logL:
log10(luminosity) [solar units]
:param Teff:
Effective temperature [K]
:param logg:
log10(surface gravity) [cgs]
:param mags:
Dictionary of absolute magnitudes in different bands
:type mags: ``dict``
:param tri:
Triangulation object used
to initialize the interpolation functions.
If pre-computed triangulation not provided, then the constructor
will calculate one. This might take several minutes, so be patient.
Much better to use pre-computed ones, as provided in, e.g.,
:class:`dartmouth.Dartmouth_Isochrone`.
:type tri: :class:`scipy.spatial.qhull.Delaunay`, optional
:param minage,maxage:
If desired, a minimum or maximum age can be manually entered.
"""
def __init__(self,m_ini,age,feh,m_act,logL,Teff,logg,mags,tri=None,
minage=None, maxage=None, ext_table=False):
"""Warning: if tri object not provided, this will be very slow to be created.
"""
self.minage = age.min()
self.maxage = age.max()
self.minmass = m_act.min()
self.maxmass = m_act.max()
self.minfeh = feh.min()
self.maxfeh = feh.max()
self.ext_table = ext_table
if minage is not None:
logging.warning("minage and maxage keywords are deprecated." + \
"Use instead the .set_bounds(age=(lo, hi)) attribute of StarModel.")
self.minage = minage
if maxage is not None:
logging.warning("minage and maxage keywords are deprecated." + \
"Use instead the .set_bounds(age=(lo, hi)) attribute of StarModel.")
self.maxage = maxage
L = 10**logL
if tri is None:
points = np.zeros((len(m_ini),3))
points[:,0] = m_ini
points[:,1] = age
points[:,2] = feh
fn = interpnd(points,m_act)
self.tri = fn.tri
else:
self.tri = tri
self.mass = interpnd(self.tri,m_act)
self._data = {'mass':m_act,
'logL':logL,
'logg':logg,
'logTeff':np.log10(Teff),
'mags':mags}
self._props = ['mass', 'logL', 'logg', 'logTeff']
self.bands = list(mags.keys())
self._mag = {band:interpnd(self.tri,mags[band]) for band in self.bands}
self.mag = {b : self._mag_fn(b) for b in self.bands}
def __getstate__(self):
odict = self.__dict__.copy()
del odict['mag'] # This can't be pickled
return odict
def __setstate__(self, odict):
self.__dict__ = odict
self.__dict__['mag'] = {b : self._mag_fn(b) for b in self.bands}
def _prop(self, prop, *args):
if prop not in self._props:
raise ValueError('Cannot call this function with {}.'.format(prop))
attr = '_{}'.format(prop)
if not hasattr(self, attr):
setattr(self, attr, interpnd(self.tri, self._data[prop]))
fn = getattr(self, attr)
return fn(*args)
def mass(self, *args):
return self._prop('mass', *args)
def logL(self, *args):
return self._prop('logL', *args)
def logg(self, *args):
return self._prop('logg', *args)
def logTeff(self, *args):
return self._prop('logTeff', *args)
def radius(self, *args):
return np.sqrt(G*self.mass(*args)*MSUN/10**self.logg(*args))/RSUN
def Teff(self, *args):
return 10**self.logTeff(*args)
def density(self, *args):
""" Mean density in g/cc
"""
M = self.mass(*args) * MSUN
V = 4./3 * np.pi * (self.radius(*args) * RSUN)**3
return M/V
def delta_nu(self, *args):
"""Returns asteroseismic delta_nu in uHz
reference: https://arxiv.org/pdf/1312.3853v1.pdf, Eq (2)
"""
return 134.88 * np.sqrt(self.mass(*args) / self.radius(*args)**3)
def nu_max(self, *args):
"""Returns asteroseismic nu_max in uHz
reference: https://arxiv.org/pdf/1312.3853v1.pdf, Eq (3)
"""
return 3120.* (self.mass(*args) /
(self.radius(*args)**2 * np.sqrt(self.Teff(*args)/5777.)))
def _mag_fn(self, band):
def fn(mass, age, feh, distance=10, AV=0.0, x_ext=0., ext_table=self.ext_table):
if x_ext==0.:
ext = extcurve_0
else:
ext = extcurve(x_ext)
if ext_table:
A = AV*EXTINCTION[band]
else:
A = AV*ext(LAMBDA_EFF[band])
dm = 5*np.log10(distance) - 5
return self._mag[band](mass, age, feh) + dm + A
return fn
def __call__(self, mass, age, feh,
distance=None, AV=0.0,
return_df=True, bands=None):
"""
Returns all properties (or arrays of properties) at given mass, age, feh
:param mass, age, feh:
Mass, log(age), metallicity. Can be float or array_like.
:param distance:
Distance in pc. If passed, then mags will be converted to
apparent mags based on distance (and ``AV``).
:param AV:
V-band extinction (magnitudes).
:param return_df: (optional)
If ``True``, return :class:``pandas.DataFrame`` containing all model
parameters at each input value; if ``False``, return dictionary
of the same.
:param bands: (optional)
List of photometric bands in which to return magnitudes.
Must be subset of ``self.bands``. If not set, then will
default to returning all available bands.
:return:
Either a :class:`pandas.DataFrame` or a dictionary containing
model values evaluated at input points.
"""
# Broadcast inputs to the same shape
mass, age, feh = [np.array(a) for a in np.broadcast_arrays(mass, age, feh)]
args = (mass, age, feh)
Ms = self.mass(*args)*1
Rs = self.radius(*args)*1
logLs = self.logL(*args)*1
loggs = self.logg(*args)*1
Teffs = self.Teff(*args)*1
if bands is None:
bands = self.bands
if distance is not None:
args += (distance, AV)
mags = {band:1*self.mag[band](*args) for band in bands}
# if distance is not None:
# dm = 5*np.log10(distance) - 5
# for band in mags:
# A = AV*EXTINCTION[band]
# mags[band] = mags[band] + dm + A
props = {'age':age,'mass':Ms,'radius':Rs,'logL':logLs,
'logg':loggs,'Teff':Teffs,'mag':mags}
if not return_df:
return props
else:
d = {}
for key in props.keys():
if key=='mag':
for m in props['mag'].keys():
d['{}_mag'.format(m)] = props['mag'][m]
else:
d[key] = props[key]
df = pd.DataFrame(d)
return df
def agerange(self, m, feh=0.0):
"""
For a given mass and feh, returns the min and max allowed ages.
"""
ages = np.arange(self.minage, self.maxage, 0.01)
rs = self.radius(m, ages, feh)
w = np.where(np.isfinite(rs))[0]
return ages[w[0]],ages[w[-1]]
def evtrack(self,m,feh=0.0,minage=None,maxage=None,dage=0.02,
return_df=True):
"""
Returns evolution track for a single initial mass and feh.
:param m:
Initial mass of desired evolution track.
:param feh: (optional)
Metallicity of desired track. Default = 0.0 (solar)
:param minage, maxage: (optional)
Minimum and maximum log(age) of desired track. Will default
to min and max age of model isochrones.
:param dage: (optional)
Spacing in log(age) at which to evaluate models. Default = 0.02
:param return_df: (optional)
Whether to return a ``DataFrame`` or dicionary. Default is ``True``.
:return:
Either a :class:`pandas.DataFrame` or dictionary
representing the evolution
track---fixed mass, sampled at chosen range of ages.
"""
if minage is None:
minage = self.minage
if maxage is None:
maxage = self.maxage
ages = np.arange(minage,maxage,dage)
Ms = self.mass(m,ages,feh)
Rs = self.radius(m,ages,feh)
logLs = self.logL(m,ages,feh)
loggs = self.logg(m,ages,feh)
Teffs = self.Teff(m,ages,feh)
mags = {band:self.mag[band](m,ages,feh) for band in self.bands}
props = {'age':ages,'mass':Ms,'radius':Rs,'logL':logLs,
'logg':loggs, 'Teff':Teffs, 'mag':mags}
if not return_df:
return props
else:
d = {}
for key in props.keys():
if key=='mag':
for m in props['mag'].keys():
d['{}_mag'.format(m)] = props['mag'][m]
else:
d[key] = props[key]
try:
df = pd.DataFrame(d)
except ValueError:
df = pd.DataFrame(d, index=[0])
return df
def isochrone(self,age,feh=0.0,minm=None,maxm=None,dm=0.02,
return_df=True,distance=None,AV=0.0):
"""
Returns stellar models at constant age and feh, for a range of masses
:param age:
log10(age) of desired isochrone.
:param feh: (optional)
Metallicity of desired isochrone (default = 0.0)
:param minm, maxm: (optional)
Mass range of desired isochrone (will default to max and min available)
:param dm: (optional)
Spacing in mass of desired isochrone. Default = 0.02 Msun.
:param return_df: (optional)
Whether to return a :class:``pandas.DataFrame`` or dictionary. Default is ``True``.
:param distance:
Distance in pc. If passed, then mags will be converted to
apparent mags based on distance (and ``AV``).
:param AV:
V-band extinction (magnitudes).
:return:
:class:`pandas.DataFrame` or dictionary containing results.
"""
if minm is None:
minm = self.minmass
if maxm is None:
maxm = self.maxmass
ms = np.arange(minm,maxm,dm)
ages = np.ones(ms.shape)*age
Ms = self.mass(ms,ages,feh)
Rs = self.radius(ms,ages,feh)
logLs = self.logL(ms,ages,feh)
loggs = self.logg(ms,ages,feh)
Teffs = self.Teff(ms,ages,feh)
mags = {band:self.mag[band](ms,ages,feh) for band in self.bands}
#for band in self.bands:
# mags[band] = self.mag[band](ms,ages)
if distance is not None:
dm = 5*np.log10(distance) - 5
for band in mags:
A = AV*EXTINCTION[band]
mags[band] = mags[band] + dm + A
props = {'M':Ms,'R':Rs,'logL':logLs,'logg':loggs,
'Teff':Teffs,'mag':mags}
if not return_df:
return props
else:
d = {}
for key in props.keys():
if key=='mag':
for m in props['mag'].keys():
d['{}_mag'.format(m)] = props['mag'][m]
else:
d[key] = props[key]
try:
df = pd.DataFrame(d)
except ValueError:
df =
|
pd.DataFrame(d, index=[0])
|
pandas.DataFrame
|
import torch
import pandas as pd
import numpy as np
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import transformers as ppb
from transformers import BertForSequenceClassification, AdamW, BertConfig
from torch.utils.data import TensorDataset,DataLoader, RandomSampler, SequentialSampler
from keras.utils import to_categorical
import time
import datetime
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('agg')
import seaborn as sns
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
def get_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def tokenize_data(tokenizer, sentences, max_len):
# Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
attention_masks = []
# For every sentence...
for sent in sentences:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = max_len, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
return torch.cat(input_ids, dim=0), torch.cat(attention_masks, dim=0)
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# data
train = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv')
validation = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv')
test =
|
pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/test.csv')
|
pandas.read_csv
|
"""
dataset = AbstractDataset()
"""
from collections import OrderedDict, defaultdict
import json
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
import random
def make_perfect_forecast(prices, horizon):
prices = np.array(prices).reshape(-1, 1)
forecast = np.hstack([np.roll(prices, -i) for i in range(0, horizon)])
return forecast[:-(horizon-1), :]
def load_episodes(path):
# pass in list of filepaths
if isinstance(path, list):
if isinstance(path[0], pd.DataFrame):
# list of dataframes?
return path
else:
# list of paths
episodes = [Path(p) for p in path]
print(f'loading {len(episodes)} from list')
csvs = [pd.read_csv(p, index_col=0) for p in tqdm(episodes) if p.suffix == '.csv']
parquets = [
|
pd.read_parquet(p)
|
pandas.read_parquet
|
import numpy as np
from pandas import DataFrame, Series
from scipy import stats
from Common.Measures.Portfolio.AbstractPortfolioMeasure import AbstractPortfolioMeasure
from Common.StockMarketIndex.AbstractStockMarketIndex import AbstractStockMarketIndex
class PortfolioLinearReg(AbstractPortfolioMeasure):
_index: AbstractStockMarketIndex
_alpha: float = -1.1
_beta: float = -1.1
_r_val: float = -1.1
_p_val: float = -1.1
_std_err: float = -1.1
def __init__(self, an_index: AbstractStockMarketIndex, portfolio_df_returns: DataFrame = DataFrame()):
self._index = an_index
index_returns: Series = an_index.Data.iloc[:, 0].pct_change() + 1
index_returns[np.isnan(index_returns)] = 1
nb_col: int = len(portfolio_df_returns.columns)
portfolio_returns: Series = portfolio_df_returns.iloc[:, 0:nb_col].sum(axis=1) / nb_col
(self._beta, self._alpha, self._r_val, self._p_val, self._std_err) = self._getValues(index_returns, portfolio_returns)
self._beta = round(self._beta, 5)
self._alpha = round(self._alpha, 5)
def _getValues(self, index_returns: Series = Series(), portfolio_returns: Series =
|
Series()
|
pandas.Series
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
|
tm.assert_frame_equal(df, expected)
|
pandas.util.testing.assert_frame_equal
|
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
msg = "unable to collapse Joint Filters"
# not implemented
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[df.index[2:7], "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[df.index[2:7], "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_select_as_multiple(setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with
|
ensure_clean_store(setup_path)
|
pandas.tests.io.pytables.common.ensure_clean_store
|
import pandas as pd
df1 = pd.DataFrame({
'student_id': ['S1', 'S2', 'S3'],
'name': ['Jan', 'Christian', 'Marius'],
'marks': [100, 90, 80]
})
df2 = pd.DataFrame({
'student_id': ['S4', 'S5', 'S6'],
'name': ['Marco', 'Anne', 'Dennis'],
'marks': [95, 100, 90]
})
print(df1)
print(df2)
print(
|
pd.concat([df1, df2], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 09:34:37 2018
@author: SilverDoe
"""
'''
========================== DataFrame ==========================================
pandas.DataFrame( data, index, columns, dtype, copy)
Parameters :
============
1. data : data takes various forms like ndarray, series, map, lists, dict, constants
and also another DataFrame.
2. index : For the row labels, the Index to be used for the resulting frame is
Optional Default np.arrange(n) if no index is passed.
3. columns : For column labels, the optional default syntax is - np.arrange(n).
This is only true if no index is passed.
4. dtype : Data type of each column.
5. copy : This command (or whatever it is) is used for copying of data, if the
default is False.
'''
#=============== Empty DataFrame =================================================
import pandas as pd
df = pd.DataFrame()
print(df)
#============= DataFrame from Lists ============================================
# no index passed, no column names given
import pandas as pd
data = [1,2,3,4,5]
df = pd.DataFrame(data)
print(df)
# no index passed, column names given
import pandas as pd
data = [['Natsu',13],['Lisanna',9],['Happy',1]]
df = pd.DataFrame(data,columns=['Name','Age'])
print(df)
# no index passed, column names given, datatype passed
import pandas as pd
data = [['Natsu',13],['Lisanna',8],['Happy',1]]
df = pd.DataFrame(data,columns=['Name','Age'],dtype=float)
print(df)
#========== Dataframe from Dictionary of ndarrays/lists ============================================
'''
>>All the ndarrays must be of same length. If index is passed, then the length
of the index should equal to the length of the arrays.
>> To preserve the order of the columns:
1. use ordered doctionary, since dictionaries will not preserve the order when created.
2. use columns index while creating the dataframe.
3. use reorder the columns the way you want by using df = df[list of column names in the order you want]
'''
# using arrays, No index given.
import pandas as pd
data = {'Name':['Lisanna', 'Natsu', 'Erza', 'Gray'],'Age':[15,20,23,20]}
df = pd.DataFrame(data)
print(df)
# using arrays, Index given.
import pandas as pd
data = {'Name':['Lisanna', 'Natsu', 'Erza', 'Gray'],'Age':[15,20,23,20]}
df = pd.DataFrame(data, index=['rank1','rank2','rank3','rank4'])
print(df)
'''
>>List of Dictionaries can be passed as input data to create a DataFrame. The
dictionary keys are by default taken as column names.
>>NaN (Not a Number) is appended in missing areas when using lists instead of arrays.
'''
# using lists, no index given
import pandas as pd
data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
df = pd.DataFrame(data)
print(df)
# using lists,index given
import pandas as pd
data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
df =
|
pd.DataFrame(data, index=['first', 'second'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cell_line', nargs=1, type=str, help='cell line to run on')
parser.add_argument('--name', nargs=1, type=str, help='name of dataset')
args = parser.parse_args()
cl = args.cell_line[0]
name = args.name[0]
network = np.load('/gpfs_home/spate116/data/spate116/GCN/%s/%s_GRN_1_STD.npy' % (cl, name), allow_pickle=True)
import networkx as nx
G = nx.DiGraph()
for target in network:
for index, row in target[1].iterrows():
G.add_edge(row['TF'], row['target'], weight=row['importance'])
RPKM = pd.read_csv('/gpfs_home/spate116/data/spate116/GCN/57epigenomes.RPKM.pc', sep='\t', header=0)
columns = RPKM.columns[1:len(RPKM.columns)]
RPKM = RPKM.drop('E128', axis=1)
RPKM = RPKM.set_axis(columns, axis=1)
x = RPKM.values #returns a numpy array
robust_scaler = preprocessing.RobustScaler()
x_scaled = robust_scaler.fit_transform(x)
RPKM =
|
pd.DataFrame(x_scaled, columns=RPKM.columns, index=RPKM.index)
|
pandas.DataFrame
|
import json
import pandas as pd
import pytest
from pandas_profiling import ProfileReport
@pytest.fixture
def data():
return
|
pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
import unittest
import os
import pathlib
import pandas as pd
import matplotlib.pyplot as plt
import logging
from neuralprophet import NeuralProphet
log = logging.getLogger("nprophet.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "example_data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
EPOCHS = 5
class IntegrationTests(unittest.TestCase):
plot = False
def test_names(self):
log.info("testing: names")
m = NeuralProphet()
m._validate_column_name("hello_friend")
def test_train_eval_test(self):
log.info("testing: Train Eval Test")
m = NeuralProphet(
n_lags=14,
n_forecasts=7,
ar_sparsity=0.1,
epochs=EPOCHS,
)
df = pd.read_csv(PEYTON_FILE)
df_train, df_test = m.split_df(df, valid_p=0.1, inputs_overbleed=True)
metrics = m.fit(df_train, freq="D", validate_each_epoch=True, valid_p=0.1)
val_metrics = m.test(df_test)
log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_trend(self):
log.info("testing: Trend")
df = pd.read_csv(PEYTON_FILE)
m = NeuralProphet(
growth="linear",
n_changepoints=100,
changepoints_range=0.9,
trend_reg=1,
trend_reg_threshold=False,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=len(df))
forecast = m.predict(df=future)
if self.plot:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_no_trend(self):
log.info("testing: No-Trend")
df =
|
pd.read_csv(PEYTON_FILE)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# ## Phase 1: Clean up of CRDC data files. This notebook contains code that cleans and merges the 8 individual crdc files for school characteristics, school support, school expenditures, AP, IB, SAT_ACT, Algebra 1 and enrollment into a two files: one for reading and the other for math.
# ### Loading necessary libraries
# In[298]:
import pandas
pandas.__version__
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[299]:
cd /Users/dansa/Documents/GitHub/Phase1/Data/CRDC
# ### 1. Cleaning school characteristics file
# In[300]:
Sch_char = pandas.read_csv("School Characteristics.csv",encoding='cp1252')
Sch_char.head()
# In[301]:
Sch_char['SCHID'] = Sch_char['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[302]:
Sch_char['LEAID'] = Sch_char['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[303]:
Sch_char.columns
# #### Dropping unnecessary columns
# In[304]:
Sch_char.drop(Sch_char.columns[[7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,30,31]], axis=1, inplace=True)
# In[305]:
Sch_char.shape
# In[306]:
#Sch_char.head()
# ##### Since we do not have NCESSCH ID we can impute it using the LEAID and SCHID.
# ##### Note: Unique NCES public school ID is generated based on the (7-digit NCES agency ID (LEAID) + 5-digit NCES school ID (SCHID). See https://nces.ed.gov/ccd/data/txt/psu10play.txt for more info
# In[307]:
cols = ['LEAID', 'SCHID']
Sch_char['NCESSCH'] = Sch_char[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[308]:
Sch_char['NCESSCH'].is_unique
# #### Renaming columns
# In[309]:
Sch_char.rename(columns={'SCH_STATUS_SPED':'Special_ed_schl','SCH_STATUS_MAGNET':'Magnet_schl','SCH_STATUS_CHARTER':'Charter_Schl','SCH_STATUS_ALT':'Alternate_schl'}, inplace=True)
# In[310]:
Sch_char.head()
# In[311]:
count = Sch_char['Charter_Schl'].value_counts()
print(count)
# ##### Recoding string Y/N values to integers 1/0
# In[312]:
Sch_char['Special_ed_schl_new'] = Sch_char['Special_ed_schl'].replace(['Yes','No'],['1','0'])
# In[313]:
Sch_char['Magnet_schl_new'] = Sch_char['Magnet_schl'].replace(['Yes','No'],['1','0'])
# In[314]:
Sch_char['Charter_Schl_new'] = Sch_char['Charter_Schl'].replace(['Yes','No'],['1','0'])
# In[315]:
Sch_char['Alternate_schl_new'] = Sch_char['Alternate_schl'].replace(['Yes','No'],['1','0'])
# In[316]:
Sch_char[['Special_ed_schl_new', 'Magnet_schl_new','Charter_Schl_new','Alternate_schl_new']] = Sch_char[['Special_ed_schl_new', 'Magnet_schl_new','Charter_Schl_new','Alternate_schl_new']].astype(int)
# #### Checking for missing or null values
# In[317]:
sns.heatmap(Sch_char.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[318]:
Sch_char.describe()
# In[319]:
Sch_char.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_schlcharacteristics.csv', index = False, header=True)
# ### 2. Cleaning school expenditure file
# In[320]:
Sch_exp = pandas.read_csv("School Expenditures.csv", encoding='cp1252')
Sch_exp.tail()
# In[321]:
Sch_exp['SCHID'] = Sch_exp['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[322]:
Sch_exp['LEAID'] = Sch_exp['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[323]:
Sch_exp.columns
# In[324]:
Sch_exp.drop(Sch_exp.columns[[7,8,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]], axis=1, inplace=True)
# In[325]:
Sch_exp.head()
# ##### Since we do not have NCESSCH ID we can impute it using the LEAID and SCHID.
# In[326]:
cols = ['LEAID', 'SCHID']
Sch_exp['NCESSCH'] = Sch_exp[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[327]:
Sch_exp.shape
# In[328]:
Sch_exp['NCESSCH'].is_unique
# ##### Renaming columns
# In[329]:
Sch_exp.rename(columns={'SCH_FTE_TEACH_WOFED':'FTE_teachers_count','SCH_SAL_TEACH_WOFED':'SalaryforTeachers'}, inplace=True)
# In[330]:
Sch_exp.head()
# In[331]:
#Sch_exp['Teacher_salary_ratio'] = (Sch_exp['SalaryforTeachers'] / Sch_exp['FTE_teachers_count'])
# #### Checking for missing or null values
# In[332]:
sns.heatmap(Sch_exp.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[333]:
Sch_exp.describe()
# #### Dropping columns with less than zero FTE Teacher counts and Salary expenditures
# In[334]:
Sch_expTC= Sch_exp[Sch_exp.FTE_teachers_count > 0]
# In[335]:
Sch_exp_clean= Sch_expTC[Sch_expTC.SalaryforTeachers > 0]
# In[336]:
Sch_exp_clean.shape
# In[337]:
Sch_exp_clean.describe()
# In[338]:
Sch_exp_clean.head()
# In[339]:
Sch_exp_clean.hist()
# In[340]:
Sch_exp_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_schlexpenses.csv', index = False, header=True)
# ### 3. Cleaning school support file
# In[341]:
Sch_sup= pandas.read_csv("School Support.csv",encoding='cp1252')
Sch_sup.head()
# In[342]:
Sch_sup['SCHID'] = Sch_sup['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[343]:
Sch_sup['LEAID'] = Sch_sup['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[344]:
Sch_sup.columns
# In[345]:
Sch_sup.head()
# ##### Dropping irrelevant columns
# In[346]:
Sch_sup.drop(Sch_sup.columns[[7,11,12,13,14,15,16,17,18,19,20,21]], axis=1, inplace=True)
# In[347]:
Sch_sup.head()
# ##### Since we do not have NCESSCH ID we can impute it using the LEAID and SCHID.
# In[348]:
cols = ['LEAID', 'SCHID']
Sch_sup['NCESSCH'] = Sch_sup[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[349]:
Sch_sup.shape
# In[350]:
Sch_sup['NCESSCH'].is_unique
# #### Checking for missing or null values
# In[351]:
sns.heatmap(Sch_sup.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[352]:
Sch_sup.describe()
# #### Filtering FTE count greater than 1 and Cert count greater than -5
# In[353]:
Sch_sup_FTEGT1= Sch_sup[Sch_sup.SCH_FTETEACH_TOT > 1]
# In[354]:
Sch_sup_clean= Sch_sup_FTEGT1[Sch_sup_FTEGT1.SCH_FTETEACH_CERT > -5]
# In[355]:
Sch_sup_clean.describe()
# In[356]:
Sch_sup_clean.shape
# In[357]:
Sch_sup_clean.head()
# In[358]:
Sch_sup_clean.describe()
# In[359]:
Sch_sup_clean.hist()
# In[360]:
Sch_sup_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_schlsupport.csv', index = False, header=True)
# ### 4. Cleaning SAT and ACT file
# In[361]:
SAT_ACT = pandas.read_csv("SAT and ACT.csv", encoding='cp1252')
SAT_ACT.head()
# In[362]:
SAT_ACT['LEAID'] = SAT_ACT['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[363]:
SAT_ACT['SCHID'] = SAT_ACT['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[364]:
SAT_ACT.columns
# In[365]:
SAT_ACT.shape
# In[366]:
SAT_ACT.drop(SAT_ACT.columns[[7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,24,25,26,27]], axis=1, inplace=True)
# In[367]:
SAT_ACT.head()
# In[368]:
cols = ['LEAID', 'SCHID']
SAT_ACT['NCESSCH'] = SAT_ACT[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# #### Adding total count of male and female participation on ACT and SAT
# In[369]:
SAT_ACT.rename(columns={'TOT_SATACT_M':'Male_part_count','TOT_SATACT_F':'Female_part_count'}, inplace=True)
# In[370]:
SAT_ACT.describe()
# In[371]:
SAT_ACTGT0= SAT_ACT.loc[SAT_ACT['Male_part_count'] > 0]
# In[372]:
SAT_ACTGT0.describe()
# In[373]:
SAT_ACTGT0['Total_SAT_ACT_students'] = (SAT_ACTGT0['Male_part_count'] + SAT_ACTGT0['Female_part_count'])
# In[374]:
SAT_ACTGT0.describe()
# In[375]:
SAT_ACTGT0.shape
# #### Keeping total counts greater than 0
# In[376]:
SAT_ACT_clean= SAT_ACTGT0[SAT_ACTGT0.Total_SAT_ACT_students > 0]
# In[377]:
SAT_ACT_clean.shape
# In[378]:
SAT_ACT_clean.head()
# #### Checking for missing or null values
# In[379]:
sns.heatmap(SAT_ACT.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[380]:
SAT_ACT_clean.describe()
# In[381]:
SAT_ACT_clean.hist()
# In[382]:
SAT_ACT_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_SAT_ACT.csv', index = False, header=True)
# ### 5. Cleaning IB file
# In[383]:
IB= pandas.read_csv("International Baccalaureate.csv",encoding='cp1252')
IB.head()
# In[384]:
IB['SCHID'] = IB['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[385]:
IB['LEAID'] = IB['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[386]:
IB.columns
# In[387]:
IB.shape
# In[388]:
IB.drop(IB.columns[[7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,25,26,27,28]], axis=1, inplace=True)
# In[389]:
IB.head()
# In[390]:
cols = ['LEAID', 'SCHID']
IB['NCESSCH'] = IB[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[391]:
IB.rename(columns={'TOT_IBENR_M':'Male_enroll_count','TOT_IBENR_F':'Female_enroll_count'}, inplace=True)
# In[392]:
IB.describe()
# #### Recoding missing values as zero so that total counts can be calculated later
# In[393]:
IB['Male_enroll_count'] = IB['Male_enroll_count'].replace(-9,0)
# In[394]:
IB['Female_enroll_count'] = IB['Female_enroll_count'].replace(-9,0)
# In[395]:
IB.describe()
# In[396]:
IB['Total_IB_students'] = (IB['Male_enroll_count'] + IB['Female_enroll_count'])
# In[397]:
IB.describe()
# In[398]:
IB.shape
# #### Keeping IB program indicator with Y/N
# In[399]:
IB_clean= IB[IB.SCH_IBENR_IND != '-9']
# In[400]:
IB_clean.shape
# In[401]:
IB_clean.dtypes
# ##### Recoding string Y/N values to integers 1/0
# In[402]:
IB_clean['SCH_IBENR_IND_new'] = IB_clean['SCH_IBENR_IND'].replace(['Yes','No'],['1','0'])
# In[403]:
IB_clean[['SCH_IBENR_IND_new']]=IB_clean[['SCH_IBENR_IND_new']].astype(int)
# In[404]:
IB_clean.head()
# #### Checking for missing or null values
# In[405]:
sns.heatmap(IB_clean.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[406]:
IB_clean.describe()
# ##### Filtering out negative values
# In[407]:
IB_clean1= IB_clean[IB_clean.SCH_IBENR_IND != '-6']
# In[408]:
IB_clean2= IB_clean1[IB_clean1.SCH_IBENR_IND != '-5']
# In[409]:
IB_clean2.describe()
# In[410]:
IB_clean2.hist()
# In[411]:
IB_clean2.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_IB.csv', index = False, header=True)
# ### 6. Cleaning AP file
# In[412]:
AP = pandas.read_csv("Advanced Placement.csv",encoding='cp1252')
AP.head()
# In[413]:
AP['SCHID'] = AP['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[414]:
AP['LEAID'] = AP['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[415]:
AP.columns
# In[416]:
AP.shape
# In[417]:
AP=AP[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME','COMBOKEY','SCH_APENR_IND','SCH_APCOURSES','SCH_APMATHENR_IND','TOT_APMATHENR_M','TOT_APMATHENR_F','SCH_APOTHENR_IND','TOT_APOTHENR_M','TOT_APOTHENR_F','TOT_APEXAM_ONEORMORE_M','TOT_APEXAM_ONEORMORE_F']]
# In[418]:
AP.shape
# In[419]:
AP.head()
# In[420]:
cols = ['LEAID', 'SCHID']
AP['NCESSCH'] = AP[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[421]:
AP.rename(columns={'TOT_APMATHENR_M':'Male_enroll_math_count','TOT_APMATHENR_F':'Female_enroll_math_count','TOT_APOTHENR_M':'Male_enroll_other_count','TOT_APOTHENR_F':'Female_enroll_other_count'}, inplace=True)
# In[422]:
AP.describe()
# In[423]:
AP.shape
# In[424]:
AP= AP[AP.SCH_APENR_IND.isin(['Yes','No'])]
# In[425]:
AP.shape
# In[426]:
AP.describe()
# ##### If AP enrollment indicator is a No, then the corresponding columns for courses and student counts are marked a -9. So lets replace -9 with 0 counts for schools that don't have any AP enrollment indicators
# In[427]:
AP['SCH_APCOURSES'] = AP['SCH_APCOURSES'].replace(-9,0)
# In[428]:
AP['Male_enroll_math_count'] = AP['Male_enroll_math_count'].replace(-9,0)
# In[429]:
AP['Female_enroll_math_count'] = AP['Female_enroll_math_count'].replace(-9,0)
# In[430]:
AP['Male_enroll_other_count'] = AP['Male_enroll_other_count'].replace(-9,0)
# In[431]:
AP['Female_enroll_other_count'] = AP['Female_enroll_other_count'].replace(-9,0)
# In[432]:
AP['TOT_APEXAM_ONEORMORE_M'] = AP['TOT_APEXAM_ONEORMORE_M'].replace(-9,0)
# In[433]:
AP['TOT_APEXAM_ONEORMORE_F'] = AP['TOT_APEXAM_ONEORMORE_F'].replace(-9,0)
# Total counts of M and F
# In[434]:
AP['Total_AP_math_students'] = (AP['Male_enroll_math_count'] + AP['Female_enroll_math_count'])
# In[435]:
AP['Total_AP_other_students'] = (AP['Male_enroll_other_count'] + AP['Female_enroll_other_count'])
# In[436]:
AP['Total_students_tookAP'] = (AP['TOT_APEXAM_ONEORMORE_M'] + AP['TOT_APEXAM_ONEORMORE_F'])
# In[437]:
AP.columns
# In[438]:
AP_math=AP[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME','COMBOKEY','NCESSCH','SCH_APENR_IND', 'SCH_APCOURSES', 'SCH_APMATHENR_IND',
'Total_AP_math_students','Total_students_tookAP']]
# In[439]:
AP_math.describe()
# #### Filtering out any ligering negative values that indicate missing not N/As
# In[440]:
AP_math_clean= AP_math.loc[AP_math['SCH_APCOURSES'] > -1]
# In[441]:
AP_math_clean= AP_math_clean.loc[AP_math_clean['Total_AP_math_students'] > -1]
# In[442]:
AP_math_clean= AP_math_clean.loc[AP_math_clean['Total_students_tookAP'] > -1]
# In[443]:
AP_math_clean.describe()
# In[444]:
AP_math_clean.shape
# In[445]:
AP_math_clean.dtypes
# ##### Recoding string Y/N values to integers 1/0
# In[446]:
AP_math_clean['SCH_APENR_IND_new'] = AP_math_clean['SCH_APENR_IND'].replace(['Yes','No'],['1','0'])
# In[447]:
AP_math_clean[['SCH_APENR_IND_new']] = AP_math_clean[['SCH_APENR_IND_new']].astype(int)
# In[448]:
AP_math_clean['SCH_APMATHENR_IND_new'] = AP_math_clean['SCH_APMATHENR_IND'].replace(['Yes','No','-9'],['1','0','0'])
# In[449]:
AP_math_clean[['SCH_APMATHENR_IND_new']] = AP_math_clean[['SCH_APMATHENR_IND_new']].astype(int)
# In[450]:
AP_math_clean.dtypes
# #### Checking for missing or null values
# In[451]:
sns.heatmap(AP_math_clean.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[452]:
AP_math_clean.describe()
# In[453]:
AP_math_clean.hist()
# In[454]:
AP_math_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_AP_math.csv', index = False, header=True)
# In[455]:
AP_other=AP[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME','COMBOKEY','NCESSCH','SCH_APENR_IND', 'SCH_APCOURSES', 'SCH_APOTHENR_IND',
'Total_AP_other_students', 'Total_students_tookAP']]
# In[456]:
AP_other_clean= AP_other[AP_other.SCH_APENR_IND.isin(['Yes','No'])]
# In[457]:
AP_other_clean.shape
# In[458]:
AP_other_clean.dtypes
# In[459]:
count_other = AP_other_clean['SCH_APOTHENR_IND'].value_counts()
print(count_other)
# ##### Recoding string Y/N values to integers 1/0
# In[460]:
AP_other_clean['SCH_APENR_IND_new'] = AP_other_clean['SCH_APENR_IND'].replace(['Yes','No'],['1','0'])
# In[461]:
AP_other_clean[['SCH_APENR_IND_new']] = AP_other_clean[['SCH_APENR_IND_new']].astype(int)
# In[462]:
AP_other_clean['SCH_APOTHENR_IND_new'] = AP_other_clean['SCH_APOTHENR_IND'].replace(['Yes','No','-9'],['1','0','0'])
# In[463]:
AP_other_clean[['SCH_APOTHENR_IND_new']] = AP_other_clean[['SCH_APOTHENR_IND_new']].astype(int)
# In[464]:
AP_other_clean.dtypes
# In[465]:
sns.heatmap(AP_other_clean.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[466]:
AP_other_clean.describe()
# In[467]:
AP_other_clean1= AP_other_clean.loc[AP_other_clean['SCH_APCOURSES'] > -1]
# In[468]:
AP_other_clean2= AP_other_clean1.loc[AP_other_clean1['Total_students_tookAP'] > -1]
# In[469]:
AP_other_clean2.describe()
# In[470]:
AP_other_clean.hist()
# In[471]:
AP_other_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_AP_other.csv', index = False, header=True)
# ### 7. Cleaning Algebra 1 file
# In[472]:
Alg1 = pandas.read_csv("Algebra I.csv",encoding='cp1252')
Alg1.head()
# In[473]:
Alg1['SCHID'] = Alg1['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[474]:
Alg1['LEAID'] = Alg1['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[475]:
Alg1.columns
# In[476]:
Alg1.shape
# In[477]:
Alg1=Alg1[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME','COMBOKEY','SCH_MATHCLASSES_ALG','SCH_MATHCERT_ALG','TOT_ALGENR_GS0910_M',
'TOT_ALGENR_GS0910_F','TOT_ALGENR_GS1112_M','TOT_ALGENR_GS1112_F','TOT_ALGPASS_GS0910_M','TOT_ALGPASS_GS0910_F','TOT_ALGPASS_GS1112_M','TOT_ALGPASS_GS1112_F']]
# In[478]:
Alg1.shape
# In[479]:
Alg1.head()
# In[480]:
cols = ['LEAID', 'SCHID']
Alg1['NCESSCH'] = Alg1[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[481]:
Alg1.rename(columns={'TOT_ALGENR_GS0910_M':'Male_enroll_9to10_count','TOT_ALGENR_GS0910_F':'Female_enroll_9to10_count','TOT_ALGENR_GS1112_M':'Male_enroll_11to12_count',
'TOT_ALGENR_GS1112_F':'Female_enroll_11to12_count','TOT_ALGPASS_GS0910_M':'Male_pass_9to10_count','TOT_ALGPASS_GS0910_F':'Female_pass_9to10_count',
'TOT_ALGPASS_GS1112_M':'Male_pass_11to12_count','TOT_ALGPASS_GS1112_F':'Female_pass_11to12_count'}, inplace=True)
# In[482]:
Alg1.columns
# In[483]:
Alg1.describe()
# ##### Lets replace -9 with 0 counts for enrollment counts so we can total values later
# In[484]:
Alg1['Male_enroll_9to10_count'] = Alg1['Male_enroll_9to10_count'].replace(-9,0)
# In[485]:
Alg1['Female_enroll_9to10_count'] = Alg1['Female_enroll_9to10_count'].replace(-9,0)
# In[486]:
Alg1['Male_enroll_11to12_count'] = Alg1['Male_enroll_11to12_count'].replace(-9,0)
# In[487]:
Alg1['Female_enroll_11to12_count'] = Alg1['Female_enroll_11to12_count'].replace(-9,0)
# In[488]:
Alg1['Male_pass_9to10_count'] = Alg1['Male_pass_9to10_count'].replace(-9,0)
# In[489]:
Alg1['Female_pass_9to10_count'] = Alg1['Female_pass_9to10_count'].replace(-9,0)
# In[490]:
Alg1['Male_pass_11to12_count'] = Alg1['Male_pass_11to12_count'].replace(-9,0)
# In[491]:
Alg1['Female_pass_11to12_count'] = Alg1['Female_pass_11to12_count'].replace(-9,0)
# Total counts of M and F
# In[492]:
Alg1['Total_Alg1_enroll_students'] = (Alg1['Male_enroll_9to10_count'] + Alg1['Female_enroll_9to10_count'] + Alg1['Male_enroll_11to12_count'] + Alg1['Female_enroll_11to12_count'])
# In[493]:
Alg1['Total_Alg1_pass_students'] = (Alg1['Male_pass_9to10_count'] + Alg1['Female_pass_9to10_count'] + Alg1['Male_pass_11to12_count'] + Alg1['Female_pass_11to12_count'])
# In[494]:
Alg1=Alg1[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME',
'COMBOKEY', 'SCH_MATHCLASSES_ALG', 'SCH_MATHCERT_ALG', 'NCESSCH',
'Total_Alg1_enroll_students', 'Total_Alg1_pass_students']]
# In[495]:
Alg1_clean= Alg1[Alg1.SCH_MATHCLASSES_ALG > 0]
# In[496]:
Alg1_clean.shape
# In[497]:
Alg1_clean.describe()
# #### Checking for missing or null values
# In[498]:
sns.heatmap(Alg1_clean.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[499]:
Alg1_clean.describe()
# In[500]:
Alg1_clean.hist()
# In[501]:
Alg1_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_Alg1.csv', index = False, header=True)
# ### 8. Cleaning Enrollment file
# In[502]:
Enroll = pandas.read_csv("Enrollment.csv",encoding='cp1252')
Enroll.head()
# In[503]:
Enroll['SCHID'] = Enroll['SCHID'].apply(lambda x: '{0:0>5}'.format(x))
# In[504]:
Enroll['LEAID'] = Enroll['LEAID'].apply(lambda x: '{0:0>7}'.format(x))
# In[505]:
Enroll.columns
# In[506]:
Enroll.shape
# In[507]:
Enroll=Enroll[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME','COMBOKEY','TOT_ENR_M','TOT_ENR_F']]
# In[508]:
Enroll.shape
# In[509]:
cols = ['LEAID', 'SCHID']
Enroll['NCESSCH'] = Enroll[cols].apply(lambda row: ''.join(row.values.astype(str)), axis=1)
# In[510]:
Enroll['Total_enroll_students'] = (Enroll['TOT_ENR_M'] + Enroll['TOT_ENR_F'])
# In[511]:
Enroll.columns
# In[512]:
Enroll=Enroll[['LEA_STATE', 'LEA_STATE_NAME', 'LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME','COMBOKEY','NCESSCH','Total_enroll_students']]
# #### Excluding schools with 0 enrollment counts
# In[513]:
Enroll_clean=Enroll[Enroll.Total_enroll_students > 0]
# In[514]:
Enroll_clean.shape
# #### Checking for missing or null values
# In[515]:
sns.heatmap(Enroll_clean.isnull(),yticklabels=False,cbar=True,cmap='viridis')
# In[516]:
Enroll_clean.describe()
# In[517]:
Enroll_clean.hist()
# In[518]:
Enroll_clean.to_csv (r'/Users/dansa/Documents/GitHub/Phase1/Data/CRDC/Clean_crdc_enrollment.csv', index = False,header=True)
# #### 9. Merge CRDC school characteristics with CCD directory to extract only high schools
# In[519]:
cd /Users/dansa/Documents/GitHub/Phase1/Data/CCD
# In[520]:
ccd_directory=
|
pandas.read_csv("Clean_ccd_directory.csv")
|
pandas.read_csv
|
import pandas as pd
from multiprocessing import Process
from release_manager import *
from rq_run import *
# import plotly.graph_objs as go
# import plotly.io as pio
import copy
import shutil
"""
sk
"""
# def getMetric(df, metric):
#
# # df['train_bugs'] = df['train_changes'] * df['train_Bug_Per']/100
# #
# # df = df.sort_values(by='train_B`ug_Per', ascending=False)
#
# metricValue = df[metric].values.tolist()
#
# return metricValue
yearMap = {}
MAX_YEAR = 8
def computeProjectYearMap():
projectYearMap = {}
for p in getProjectNames():
releaselist = release_manager.getProject(p).getReleases()
for startYear in range(1, MAX_YEAR):
onlyReleasesToConsider = []
releaseObjects = getReleasesFromYear(releaselist, startYear)
onlyReleasesToConsider += [y.getReleaseDate() for y in releaseObjects]
projectYearMap[p+"_RELEASES_IN_YEAR_"+str(startYear)] = onlyReleasesToConsider
return projectYearMap
projectYearMap = None #computeProjectYearMap()
#
# print(" pyear map ",projectYearMap)
# def getSelectionRules():
#
# # resampling = []
# #
# # testChanges = [1,2,3]
# #
# # for samples in [100]:
# #
# # for split in [40]:
# #
# # if samples == len(testChanges):
# # samplesStr = 'tCg'
# # else:
# # samplesStr = str(samples)
# #
# # longLabelApprch = '_Smp_' + str(samplesStr) + "Sp" + "_" + str(
# # split) + "_Test"
# #
# # resampling.append(longLabelApprch)
# #
# # return resampling + ['Train_ALL_Test_', 'RESAMPLING_SAMPLE_FROM_Recent_Smp_', 'Train_3months', 'Train_6months']
#
# df = pd.read_csv('./results/project_active_merchant_results.csv')
# print(df['trainAppraoch'].unique().tolist())
# return df['trainAppraoch'].unique().tolist()
def readable(selRule):
if "YEAR:" in selRule:
return selRule.replace("YEAR:", "Y")
elif "3MONTHS" in selRule:
return selRule.replace('3MONTHS', 'M3')
elif "6MONTHS" in selRule:
return selRule.replace('6MONTHS', 'M6')
elif "RECENT:RELEASE" in selRule:
return selRule.replace('RECENT:RELEASE', 'RR')
else:
return selRule
#
#
# if True:
# return selRule #.replace("RESAMPLE_", '')
# else:
# if selRule == 'RESAMPLE_EARLY':
# return 'MICRO_Early'
# elif selRule == 'RESAMPLE_RANDOM':
# return 'MICRO_Random'
# elif selRule == 'RESAMPLE_RECENT':
# return 'MICRO_Recent'
# elif selRule == 'RESAMPLING_SAMPLE_FROM_':
# return 'MICRO_Specific'
# elif selRule == 'Train_ALL_Test_':
# return 'SMOTE_All'
# elif selRule == 'Train_3months_Test_':
# return 'SMOTE_Recent_3MONTH'
# elif selRule == 'Train_6months_Test_':
# return 'SMOTE_Recent_6MONTH'
# elif selRule == 'Early_60_Per':
# return "Early_60%"
# elif selRule == 'ALL':
# return 'SMOTE_ALL'
# else:
# return 'error'
def getRQ1a():
policies = []
for samples in [12, 25, 50]:#, 100]:
for buggyPercentage in [10, 20, 30, 40, 50, 60, 70, 80, 90]:
buggySamples = round(buggyPercentage * samples / 100)
nonBuggySamples = abs(samples - buggySamples)
if buggySamples > 5 and nonBuggySamples > 5:
# print(buggySamples, nonBuggySamples)
policies.append('RESAMPLE_'+str(buggySamples)+"_"+str(nonBuggySamples))
policies.append('ALL')
# winMap = {'RESAMPLE_50_50_LR': 0, 'RESAMPLE_40_60_LR': 0, 'RESAMPLE_25_25_LR': 0, 'RESAMPLE_60_40_LR': 0, 'ALL_SVM': 0,
# 'RESAMPLE_20_30_LR': -1, 'RESAMPLE_30_70_LR': -1, 'RESAMPLE_50_50_SVM': -2, 'RESAMPLE_12_13_LR': -2, 'ALL_LR': -2,
# 'RESAMPLE_25_25_SVM': -2, 'RESAMPLE_40_60_SVM': -2, 'RESAMPLE_25_25_KNN': -2, 'RESAMPLE_30_20_LR': -2,
# 'ALL_NB': -2, 'RESAMPLE_15_10_LR': -2, 'RESAMPLE_30_70_SVM': -2, 'RESAMPLE_15_35_SVM': -2, 'RESAMPLE_20_80_LR': -2,}
#
# # winMap = {'RESAMPLE_50_50_LR': 0 }
return policies, 'rq1a'
def getRq4():
# Early(cfs), TCA +, Bellwether(cfs) and Early
# Bellwether(cfs)
return ['TCAPLUS', 'BELLWETHER', '150_25_25_random_None', '150_25_25_BELLWETHER_random_None'], 'rq4'
def getRq5():
# Early(cfs), Early(2) and Early Bellwether(2)
return ['150_25_25_random_None', '150_25_25_random__la_lt_',
'150_25_25_BELLWETHER_random__la_lt_'], 'rq5'
def getrqX():
sp = []
for k,v in CLASSIFIER_SAMPLING_POLICY_MAP.items():
sp += v
return list(set(sp)), 'rqx'
def getrqfuture():
policies = []
policies.append('RESAMPLE_150_20_30')
policies.append('RESAMPLE_CPDP_150_20_30')
return policies, 'rqfuture'
def getTempRQ():
policies = []
policies.append('ALL')
policies.append('150_25_25_random_None')
policies.append('150_25_25_random__la_lt_')
return policies, 'rqtemp'
def getRQ4():
policies = []
# policies.append('RESAMPLE_YEAR_1_40_60')
# RESAMPLE_300_20_30
# policies.append('RESAMPLE_75_20_30')
policies.append('RESAMPLE_150_20_30')
policies.append('RESAMPLE_300_20_30')
policies.append('RESAMPLE_600_20_30')
policies.append('RESAMPLE_1200_20_30')
policies.append('ALL')
# policies.append('RESAMPLE_2400_20_30')
# policies.append('3MONTHS')
# policies.append('RECENT_RELEASE')
# policies.append('6MONTHS')
return policies, 'rq4'
def getRQ5():
policies = []
# policies.append('RESAMPLE_YEAR_1_40_60')
# RESAMPLE_300_20_30
# policies.append('RESAMPLE_75_20_30')
# policies.append('RESAMPLE_300_20_30')
# policies.append('RESAMPLE_600_20_30')
# policies.append('RESAMPLE_1200_20_30')
# policies.append('RESAMPLE_2400_20_30')
policies.append('RESAMPLE_150_20_30')
policies.append('3MONTHS')
policies.append('RECENT_RELEASE')
policies.append('6MONTHS')
policies.append('ALL')
return policies, 'rq5'
def getSelectionRules():
resamplingPolicies = []
if True:
# resamplingPolicies.append('RESAMPLE_EARLY')
# resamplingPolicies.append('RESAMPLE_RANDOM')
# resamplingPolicies.append('RESAMPLE_RECENT')
# resamplingPolicies.append('EARLY')
# resamplingPolicies.append('RANDOM')
# resamplingPolicies.append('RECENT')
# resamplingPolicies.append('RESAMPLE_EARLY_RECENT')
# resamplingPolicies.append('RESAMPLE_EARLY_RANDOM_RECENT')
# resamplingPolicies.append('RESAMPLE_EARLY_RANDOM')
# resamplingPolicies.append('RESAMPLE_RANDOM_RECENT')
#
#
# resamplingPolicies.append('SMOTE_EARLY_RECENT')
# resamplingPolicies.append('SMOTE_EARLY_RANDOM_RECENT')
# resamplingPolicies.append('SMOTE_EARLY_RANDOM')
# resamplingPolicies.append('SMOTE_RANDOM_RECENT')
resamplingPolicies.append('RESAMPLE_RANDOM12')
resamplingPolicies.append('RANDOM12')
resamplingPolicies.append('ALL')
resamplingPolicies.append('3MONTHS')
resamplingPolicies.append('6MONTHS')
elif False:
resamplingPolicies.append('ALL')
# resamplingPolicies.append('RESAMPLE_RANDOM_50_200')
resamplingPolicies.append('RESAMPLE_RANDOM_40_100')
# resamplingPolicies.append('RESAMPLE_RECENT_40_100')
# elif proof:
#
# resamplingPolicies.append('ALL')
#
# if year == 1:
# resamplingPolicies.append('RESAMPLE_RANDOM_40_200')
# elif year == 2:
# resamplingPolicies.append('RESAMPLE_RANDOM_40_100')
# elif year == 3:
# resamplingPolicies.append('RESAMPLE_RECENT_40_100')
# elif year == 4:
# resamplingPolicies.append('RESAMPLE_RANDOM_40_100')
# elif year == 5:
# resamplingPolicies.append('RESAMPLE_RANDOM_40_200')
# elif year == 6:
# resamplingPolicies.append('RESAMPLE_RANDOM_40_50')
# elif year == 7:
# resamplingPolicies.append('RESAMPLE_RECENT_50_100')
# else:
# float("opps error!!!")
#
# else:
# for split in [20, 40, 50, 60, 80]:
#
# for samples in [12, 25, 50, 100, 200]:
#
# info = "_" + str(split) + "_" + str(samples)
#
# resamplingPolicies.append("RESAMPLE_EARLY" + info)
# resamplingPolicies.append("RESAMPLE_RANDOM" + info)
# resamplingPolicies.append("RESAMPLE_RECENT"+info)
#
# resamplingPolicies.append('ALL')
# resamplingPolicies.append('Early60')
return resamplingPolicies
def getRQ1b():
# policies = []
# policies.append('ALL')
# policies.append('RESAMPLE_YEAR_1_20_30')
# policies.append('RESAMPLE_YEAR_2_20_30')
# policies.append('RESAMPLE_YEAR_3_20_30')
#
# # policies.append('RESAMPLE_YEAR_1_40_60')
# # policies.append('RESAMPLE_YEAR_2_40_60')
# # policies.append('RESAMPLE_YEAR_3_40_60')
# return policies, 'rq1b'
#
# policies.append('RESAMPLE_150_20_30')
# policies.append('RESAMPLE_300_20_30')
# policies.append('RESAMPLE_600_20_30')
# policies.append('RESAMPLE_1200_20_30')
# policies.append('ALL')
winMap = {'RESAMPLE_1200_20_30_LR': -1, 'RESAMPLE_600_20_30_LR': -1, 'RESAMPLE_300_20_30_LR': -1, 'ALL_LR': -1,
'RESAMPLE_150_20_30_LR': -1, 'ALL_SVM': -1}
# , 'ALL_KNN': -2, 'RESAMPLE_1200_20_30_SVM': -3,
# 'RESAMPLE_600_20_30_SVM': -3, 'ALL_NB': -4, 'RESAMPLE_1200_20_30_KNN': -4, 'RESAMPLE_300_20_30_KNN': -4,
# 'RESAMPLE_600_20_30_KNN': -4, 'RESAMPLE_150_20_30_KNN': -4, 'RESAMPLE_300_20_30_SVM': -4, 'ALL_RF': -4,
# 'ALL_DT': -5, 'RESAMPLE_150_20_30_SVM': -5, 'RESAMPLE_600_20_30_RF': -5, 'RESAMPLE_300_20_30_RF': -5,
# 'RESAMPLE_1200_20_30_NB': -6, 'RESAMPLE_1200_20_30_RF': -6, 'RESAMPLE_150_20_30_RF': -6,
# 'RESAMPLE_1200_20_30_DT': -7, 'RESAMPLE_600_20_30_DT': -7, 'RESAMPLE_600_20_30_NB': -7,
# 'RESAMPLE_300_20_30_DT': -7, 'RESAMPLE_150_20_30_DT': -7, 'RESAMPLE_300_20_30_NB': -7, 'RESAMPLE_150_20_30_NB': -9}
#
return list(winMap.keys()), 'rq1b'
def convert(v, metric):
return [float(vv) for vv in v]
# vv= []
#
# if v is not None:
#
# for vvv in v:
#
# try:
# converted = float(vvv)
# if str(converted).lower() != 'nan':
# vv.append(converted)
# elif metric in ['g-score']:
# vv.append(0)
# except:
# continue
#
# return vv
def splitFullPolicy(samplingPolicy):
spArr = samplingPolicy.split('_')
classifier = spArr[len(spArr) - 1]
rawPolicy = samplingPolicy.replace('_'+classifier, '')
rr = rawPolicy
if rawPolicy == 'M6':
rr = '6MONTHS'
elif rawPolicy == 'M3':
rr = '3MONTHS'
return rr, classifier
def filterDFWins(df, samplingPolicies):
releaseList = []
for samplingPolicy in samplingPolicies:
rawPolicy, classifier = splitFullPolicy(samplingPolicy)
samplingReleaseList = df[ (df['trainApproach'].str.strip() == rawPolicy) & (df['classifier'].str.strip() == classifier)]['testReleaseDate'].values.tolist()
# print('trying ', "["+rawPolicy+"]", "["+classifier+"]" , ' found ', len(samplingReleaseList), ' releases common ', samplingReleaseList )
if samplingReleaseList is None or len(samplingReleaseList) == 0:
return []
else:
releaseList.append(samplingReleaseList)
testReleaseSet = None
for releases in releaseList:
if testReleaseSet is None:
testReleaseSet = list(set(releases))
continue
else:
testReleaseSet = list(set(testReleaseSet) & set(releases))
return testReleaseSet
def filterDF(df, samplingPolicies):
releaseList = []
for samplingPolicy in samplingPolicies:
samplingReleaseList = df[ df['trainApproach'] == samplingPolicy ]['testReleaseDate'].values.tolist()
# print(samplingPolicy, len(samplingReleaseList))
if samplingReleaseList is None or len(samplingReleaseList) == 0:
return []
else:
releaseList.append(samplingReleaseList)
testReleaseSet = None
for releases in releaseList:
if testReleaseSet is None:
testReleaseSet = list(set(releases))
continue
else:
testReleaseSet = list( set(testReleaseSet) & set(releases) )
return testReleaseSet
def toBoxLabel(selRule, rq):
boxLabel = selRule.replace('RESAMPLE_', '')
# boxLabel = boxLabel.replace('_LR', '')
boxLabel = boxLabel.replace('_', ":")
boxLabel = boxLabel.replace('CPDP', "C")
boxLabel = boxLabel.replace('150:25:25', 'E[0,150]')
boxLabel = boxLabel.replace('300:20:30', 'E[0,300]')
boxLabel = boxLabel.replace('600:20:30', 'E[0,600]')
boxLabel = boxLabel.replace('1200:20:30', 'E[0,1200]')
if '25:25' == boxLabel:
boxLabel = 'E'
if (rq == 'rq1a' and ( 'ALL' in boxLabel or '20:30' in boxLabel)) or (rq == 'rq1b' and ('ALL' in boxLabel or 'E[0,150]' in boxLabel)) or \
(rq == 'rq2' and 'E[0,150]' in boxLabel) or (rq == 'rqfuture' and 'E[0,150]' in boxLabel):
if rq == 'rq1a':
boxLabel = '<b>'+boxLabel+' ←</b>'
else:
boxLabel = '<b>' + boxLabel + '</b>'
return readable(boxLabel)
# def getColor(rqText):
#
# if rqText == 'rq1a':
# color = green_light
# elif rqText == 'rq1b' or rqText == 'rq4':
# color = green_light
# elif rqText == 'rq2':
# color = green_light
# else:
# float("error")
#
# return color
def getRank(rq, metric, selRule):
df = pd.read_csv('./results/a_' + rq + '/sk/z' + metric + ".csv")
# minRank = df['rank'].min()
# maxRank = df['rank'].max()
df = df[df['policy'].str.strip() == selRule]
rank = int(df['rank'].values.tolist()[0])
return rank
def getWinRank(rq, metric, fullSelectionRule):
df = pd.read_csv('./results/a_' + rq + '/sk/z' + metric + ".csv")
# minRank = df['rank'].min()
# maxRank = df['rank'].max()
df = df[df['policy'].str.strip() == fullSelectionRule]
rank = int(df['rank'].values.tolist()[0])
return rank
# if maxRank == minRank:
# return 0
#
# return (rank - minRank) / (maxRank - minRank)
def getDefaultColor(metric):
if metric.lower() in ['brier', 'ifa', 'd2h', 'pf']:
return dark_orange
else:
return dark_green
def getMediumColor(metric):
if metric.lower() in ['brier', 'ifa', 'd2h', 'pf']:
return medium_orange
else:
return medium_green
def getLightColor(metric):
if metric.lower() in ['brier', 'ifa', 'd2h', 'pf']:
return light_orange
else:
return light_green
# def plotWins(metric):
#
# write = False
#
# for rqm in [ getrq2 ]:
#
# samplingPolicies , rq = rqm()
#
# print(samplingPolicies)
#
# boxplot_data = []
#
# labelRankMap = {}
#
# for fullSelectionRule in samplingPolicies: #[ 'RESAMPLE_EARLY', 'ALL' , 'RESAMPLE_RANDOM','RESAMPLE_RECENT']:
#
# rawPolicy, classifier = splitFullPolicy(fullSelectionRule)
#
# print( ' rawPolicy, classifier ', rawPolicy, classifier)
#
# metricValues = []
#
# count = 0
#
# for pType in [ 'All_projects' ]:
#
# projectsSkipped = 0
#
# for p in getProjectNames(pType):
#
# df = pd.read_csv('./results/a_'+rq+'/project_' + p + '_results.csv')
# # df = df[df['classifier'] == classifier]
#
# # print("sending ",p)
# commonReleases = filterDFWins(df, samplingPolicies)
#
# count += len(commonReleases)
#
# # print("commonReleases ",len(commonReleases))
#
# if len(df) > 0:
# sDF = df[ (df['testReleaseDate'].isin(commonReleases) ) & ( df['trainApproach'] == rawPolicy ) & (df['classifier'] == classifier)]
# else:
# projectsSkipped += 1
# continue
#
# v = sDF[metric].values.tolist()
#
# if len(commonReleases) != len(v):
# print('**** not equal \t\t', p, rawPolicy, metric, commonReleases, v, sDF['testReleaseDate'].values.tolist())
#
# before = len(v)
# v = convert(v)
# if before - len(v) > 0:
# print("Loss = ", before - len(v))
#
# metricValues += v
#
# print(projectsSkipped, ' for ' , rawPolicy)
#
# boxLabel = toBoxLabel(fullSelectionRule, rq)
#
# print(fullSelectionRule, ' population size = ', len(metricValues))
# boxplot_data.append(go.Box(fillcolor=white, marker=dict(color=black),
# y=metricValues, name=boxLabel, showlegend=False,
# orientation="v",
# line=dict(width=1.5)))
#
# labelRankMap[boxLabel] = getWinRank(rq, metric, fullSelectionRule)
#
# sortByMedian(boxplot_data, metric.lower() in ['brier', 'ifa', 'd2h', 'pf'])
#
# if metric == 'roc_auc':
# axis = 'AUC'
# else:
# axis = metric.upper()
#
# previousRank = None
# previousColor = None
#
# mediumUsed = False
# for b in range(0, len(boxplot_data)):
#
# currentRank = labelRankMap[boxplot_data[b].name]
#
# if previousRank is None:
# boxplot_data[b].fillcolor = getDefaultColor(metric)
# previousColor = getDefaultColor(metric)
# elif abs(previousRank - currentRank) == 0:
# boxplot_data[b].fillcolor = previousColor
# elif abs(previousRank - currentRank) >= 1:
# if not mediumUsed:
# boxplot_data[b].fillcolor = getMediumColor(metric)
# previousColor = getMediumColor(metric)
# mediumUsed = True
# else:
# boxplot_data[b].fillcolor = getLightColor(metric)
# previousColor = getLightColor(metric)
# else:
# float('error')
#
# previousRank = currentRank
#
# # elif currentRank != previousRank:
# # previousRank = currentRank
# # if previousColor == white:
# # boxplot_data[b].fillcolor = getColor(rq)
# # previousColor = getColor(rq)
# # else:
# # boxplot_data[b].fillcolor = white
# # previousColor = white
# # elif currentRank == previousRank:
# # boxplot_data[b].fillcolor = previousColor
# # else:
# # float('error')
#
# if not write:
# plot_boxes(boxplot_data, rq+"_"+metric, '', axis, rq)
def get_common_test_releases(projectDF, sampling_policies, classifiers):
common_releases = None
for sp in sampling_policies:
for c in classifiers:
if sp not in CLASSIFIER_SAMPLING_POLICY_MAP[c]:
continue
# if c in ['TUNED_DT', 'TUNED_LR'] and sp != 'FIRST_150':
# continue
# print('Project lines # : ',len(projectDF))
curr_common_releases = projectDF[(projectDF['trainApproach'] == sp) & (projectDF['classifier'] == c)][
'testReleaseDate'].values.tolist()
# print(len(curr_common_releases), sp, c)
if common_releases is None:
common_releases = curr_common_releases
# print('\t once = ', common_releases)
continue
else:
common_releases = list(set(common_releases) & set(curr_common_releases))
# print('\t reset = ', common_releases)
# print('\t ', sp, c, len(common_releases))
if common_releases is None:
return []
return common_releases
def getExpClassifiers():
c = []
for k, v in CLASSIFIER_SAMPLING_POLICY_MAP.items():
c.append(k)
return list(set(c))
metric_srule_size = {}
def aggregate_eval_measures(metric, projectStartDateMap):
commonReleasesMap = {}
f = open('./results/sk/z' + metric + '.txt', "a+")
classifiers = getExpClassifiers()
# classifiers = ['LogisticRegression']
for rqm in [ getrqX ]:
samplingPolicies , rq = rqm()
print(' >> ', samplingPolicies,rq)
for classifier in classifiers:
for selRule in samplingPolicies: #[ 'RESAMPLE_EARLY', 'ALL' , 'RESAMPLE_RANDOM','RESAMPLE_RECENT']:
# if classifier not in ['LR_None'] and selRule == '150_25_25_random_None':
# continue
metricValues = []
count = 0
for pType in [ 'All_projects' ]:
projectsSkipped = 0
for p in getProjectNames():
if p == BELLWETHER_PROJECT:
continue
try:
projectdf =
|
pd.read_csv('./results/project_' + p + '_results.csv')
|
pandas.read_csv
|
#coding:utf-8
import pandas as pd
import numpy as np
# 读取个人信息
train_agg = pd.read_csv('../data/train_agg.csv',sep='\t')
test_agg = pd.read_csv('../data/test_agg.csv',sep='\t')
agg = pd.concat([train_agg,test_agg],copy=False)
# 日志信息
train_log = pd.read_csv('../data/train_log.csv',sep='\t')
test_log = pd.read_csv('../data/test_log.csv',sep='\t')
log = pd.concat([train_log,test_log],copy=False)
log['EVT_LBL_1'] = log['EVT_LBL'].apply(lambda x:x.split('-')[0])
log['EVT_LBL_2'] = log['EVT_LBL'].apply(lambda x:x.split('-')[1])
log['EVT_LBL_3'] = log['EVT_LBL'].apply(lambda x:x.split('-')[1])
# 用户唯一标识
train_flg = pd.read_csv('../data/train_flg.csv',sep='\t')
test_flg =
|
pd.read_csv('../data/submit_sample.csv',sep='\t')
|
pandas.read_csv
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : wangbing
# @FILE : DeepSCP.py
# @Time : 9/20/2021 9:29 PM
# @Desc : DeepSCP: utilizing deep learning to boost single-cell proteome coverage
import numpy as np
import pandas as pd
import scipy as sp
import lightgbm as lgb
import networkx as nx
import matplotlib.pyplot as plt
from copy import deepcopy
from time import time
from joblib import Parallel, delayed
from scipy.stats.mstats import gmean
from bayes_opt import BayesianOptimization
from triqler.qvality import getQvaluesFromScores
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score, auc, roc_curve
from sklearn.model_selection import StratifiedKFold, train_test_split
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import argparse
import warnings
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = 'Arial'
def showcols(df):
cols = df.columns.tolist()
cols = cols + (10 - len(cols) % 10) % 10 * ['None']
cols = np.array(cols).reshape(-1, 10)
return pd.DataFrame(data=cols, columns=range(1, 11))
def Prob2PEP(y, y_prob):
label = np.array(deepcopy(y))
score = np.array(deepcopy(y_prob))
srt_idx = np.argsort(-score)
label_srt = label[srt_idx]
score_srt = score[srt_idx]
targets = score_srt[label_srt == 1]
decoys = score_srt[label_srt != 1]
_, pep = getQvaluesFromScores(targets, decoys, includeDecoys=True)
return pep[np.argsort(srt_idx)]
def Score2Qval(y0, y_score):
y = np.array(deepcopy(y0)).flatten()
y_score = np.array(y_score).flatten()
y[y != 1] = 0
srt_idx = np.argsort(-y_score)
y_srt = y[srt_idx]
cum_targets = y_srt.cumsum()
cum_decoys = np.abs(y_srt - 1).cumsum()
FDR = np.divide(cum_decoys, cum_targets)
qvalue = np.zeros(len(FDR))
qvalue[-1] = FDR[-1]
qvalue[0] = FDR[0]
for i in range(len(FDR) - 2, 0, -1):
qvalue[i] = min(FDR[i], qvalue[i + 1])
qvalue[qvalue > 1] = 1
return qvalue[np.argsort(srt_idx)]
def GroupProteinPEP2Qval(data, file_column, protein_column, target_column, pep_column):
data['Protein_label'] = data[protein_column] + '_' + data[target_column].map(str)
df = []
for i, j in data.groupby(file_column):
df_pro = j[['Protein_label', target_column, pep_column]].sort_values(pep_column).drop_duplicates(
subset='Protein_label', keep='first')
df_pro['protein_qvalue'] = Score2Qval(df_pro[target_column].values, -df_pro[pep_column].values)
df.append(pd.merge(j,
df_pro[['Protein_label', 'protein_qvalue']],
on='Protein_label',
how='left'))
return pd.concat(df, axis=0).drop('Protein_label', axis=1)
class SampleRT:
def __init__(self, r=3):
self.r = r
def fit_tranform(self, data, file_column, peptide_column, RT_column, score_column,target_column):
self.file_column = file_column
self.peptide_column = peptide_column
self.RT_column = RT_column
self.score_column = score_column
self.target_column = target_column
data['File_Pep'] = data[self.file_column] + '+' + data[self.peptide_column]
dftagraw = data[data[self.target_column] == 1]
dfrevraw = data[data[self.target_column] != 1]
dftag1 = self.makePeptied_File(self.Repeat3(dftagraw))
dfrev1 =self.makePeptied_File(self.Repeat3(dfrevraw))
dftag2 = self.makeRTfeature(dftag1)
dfrev2 = self.makeRTfeature(dfrev1)
reg = ElasticNet()
pred_tag_tag = pd.DataFrame(data=np.zeros_like(dftag1.values), columns=dftag1.columns, index=dftag1.index)
pred_rev_tag = pd.DataFrame(data=np.zeros_like(dfrev1.values), columns=dfrev1.columns, index=dfrev1.index)
pred_rev_rev = pd.DataFrame(data=np.zeros_like(dfrev1.values), columns=dfrev1.columns, index=dfrev1.index)
pred_tag_rev = pd.DataFrame(data=np.zeros_like(dftag1.values), columns=dftag1.columns, index=dftag1.index)
scores_tag_tag = []
scores_rev_tag = []
scores_tag_rev = []
scores_rev_rev = []
samples = dftag1.columns.tolist()
for sample in samples:
y_tag = dftag1[~dftag1[sample].isna()][sample]
X_tag = dftag2.loc[dftag2.index.isin(y_tag.index)]
y_rev = dfrev1[~dfrev1[sample].isna()][sample]
X_rev = dfrev2.loc[dfrev2.index.isin(y_rev.index)]
reg_tag = reg
reg_tag.fit(X_tag, y_tag)
scores_rev_tag.append(reg_tag.score(X_rev, y_rev))
scores_tag_tag.append(reg_tag.score(X_tag, y_tag))
pred_rev_tag.loc[dfrev2.index.isin(y_rev.index), sample] = reg_tag.predict(X_rev)
pred_tag_tag.loc[dftag2.index.isin(y_tag.index), sample] = reg_tag.predict(X_tag)
reg_rev = reg
reg_rev.fit(X_rev, y_rev)
scores_rev_rev.append(reg_rev.score(X_rev, y_rev))
scores_tag_rev.append(reg_rev.score(X_tag, y_tag))
pred_rev_rev.loc[dfrev2.index.isin(y_rev.index), sample] = reg_rev.predict(X_rev)
pred_tag_rev.loc[dftag2.index.isin(y_tag.index), sample] = reg_rev.predict(X_tag)
pred_rev_tag[pred_rev_tag == 0.0] = np.nan
pred_tag_tag[pred_tag_tag == 0.0] = np.nan
pred_rev_rev[pred_rev_rev == 0.0] = np.nan
pred_tag_rev[pred_tag_rev == 0.0] = np.nan
self.cmp_scores = pd.DataFrame({'score': scores_tag_tag + scores_rev_tag + scores_tag_rev + scores_rev_rev,
'type': ['RT(tag|tag)'] * len(scores_tag_tag) + ['RT(rev|tag)'] * len(scores_rev_tag) +
['RT(tag|rev)'] * len(scores_tag_rev) + ['RT(rev|rev)'] * len(scores_tag_rev)})
pred_rev = pd.merge(self.makeRTpred(pred_rev_rev, 'RT(*|rev)'),
self.makeRTpred(pred_rev_tag, 'RT(*|tag)'), on='File_Pep')
dfrevraw = pd.merge(dfrevraw, pred_rev, on='File_Pep', how='left')
pred_tag = pd.merge(self.makeRTpred(pred_tag_rev, 'RT(*|rev)'),
self.makeRTpred(pred_tag_tag, 'RT(*|tag)'), on='File_Pep')
dftagraw = pd.merge(dftagraw, pred_tag, on='File_Pep', how='left')
df = pd.concat([dftagraw, dfrevraw], axis=0)
df['DeltaRT'] = ((df[self.RT_column] - df['RT(*|rev)']).apply(abs) +
(df[self.RT_column] - df['RT(*|tag)']).apply(abs))
df['DeltaRT'] = df['DeltaRT'].apply(lambda x: np.log2(x + 1))
return df
def Repeat3(self, data):
df = pd.Series([i.split('+')[1] for i in data['File_Pep'].unique()]).value_counts()
return data[data[self.peptide_column].isin(df[df >= self.r].index)]
def makePeptied_File(self, data):
data1 = data.sort_values(self.score_column, ascending=False).drop_duplicates(subset='File_Pep',
keep='first')[
[self.file_column, self.peptide_column, self.RT_column]]
temp1 = list(zip(data1.iloc[:, 0], data1.iloc[:, 1], data1.iloc[:, 2]))
G = nx.Graph()
G.add_weighted_edges_from(temp1)
df0 = nx.to_pandas_adjacency(G)
df = df0[df0.index.isin(data1[self.peptide_column].unique())][data1[self.file_column].unique()]
df.index.name = self.peptide_column
df[df == 0.0] = np.nan
return df
def makeRTfeature(self, data):
df_median = data.median(1)
df_gmean = data.apply(lambda x: gmean(x.values[~np.isnan(x.values)]), axis=1)
df_mean = data.mean(1)
df_std = data.std(1)
df_cv = df_std / df_mean * 100
df_skew = data.apply(lambda x: x.skew(), axis=1)
df = pd.concat([df_median, df_gmean, df_mean, df_std, df_cv, df_skew], axis=1)
df.columns = ['Median', 'Gmean', 'Mean', 'Std', 'CV', 'Skew']
return df
def makeRTpred(self, data, name):
m, n = data.shape
cols = np.array(data.columns)
inds = np.array(data.index)
df_index = np.tile(inds.reshape(-1, 1), n).flatten()
df_columns = np.tile(cols.reshape(1, -1), m).flatten()
values = data.values.flatten()
return pd.DataFrame({'File_Pep': df_columns + '+' + df_index, name: values})
class MQ_SampleRT:
def __init__(self, r=3, filter_PEP=False):
self.r = r
self.filter_PEP = filter_PEP
def fit_tranform(self, data):
if self.filter_PEP:
data = data[data['PEP'] <= self.filter_PEP]
dftagraw = self.get_tag(data)
dfrevraw = self.get_rev(data)
df = pd.concat([self.get_tag(data), self.get_rev(data)], axis=0)
sampleRT = SampleRT(r=self.r)
dfdb = sampleRT.fit_tranform(df,
file_column='Experiment',
peptide_column='Modified sequence',
RT_column='Retention time',
score_column='Score',
target_column='label')
self.cmp_scores = sampleRT.cmp_scores
dfdb['PEPRT'] = dfdb['DeltaRT'] * (1 + dfdb['PEP'])
dfdb['ScoreRT'] = dfdb['Score'] / (1 + dfdb['DeltaRT'])
return dfdb
def get_tag(self, df):
df = df[~df['Proteins'].isna()]
df_tag = df[(~df['Proteins'].str.contains('CON_')) &
(~df['Leading razor protein'].str.contains('REV__'))]
df_tag['label'] = 1
return df_tag
def get_rev(self, df):
df_rev = df[(df['Leading razor protein'].str.contains('REV__')) &
(~df['Leading razor protein'].str.contains('CON__'))]
df_rev['label'] = 0
return df_rev
class IonCoding:
def __init__(self, bs=1000, n_jobs=-1):
ino = [
'{0}{2};{1}{2};{0}{2}(2+);{1}{2}(2+);{0}{2}-NH3;{1}{2}-NH3;{0}{2}(2+)-NH3;{1}{2}(2+)-NH3;{0}{2}-H20;{1}{2}-H20;{0}{2}(2+)-H20;{1}{2}(2+)-H20'.
format('b', 'y', i) for i in range(1, 47)]
ino = np.array([i.split(';') for i in ino]).flatten()
self.MI0 = pd.DataFrame({'MT': ino})
self.bs = bs
self.n_jobs = n_jobs
def fit_transfrom(self, data):
print('++++++++++++++++OneHotEncoder CMS(Chage + Modified sequence)++++++++++++++')
t0 = time()
x = self.onehotfeature(data['CMS']).reshape(data.shape[0], -1, 30)
print('using time', time() - t0)
print('x shape: ', x.shape)
print('++++++++++++++++++++++++++Construct Ion Intensities Array++++++++++++++++++++')
t0 = time()
y = self.ParallelinoIY(data[['Matches', 'Intensities']])
print('using time', time() - t0)
print('y shape: ', y.shape)
return x, y
def onehotfeature(self, df0, s=48):
df = df0.apply(lambda x: x + (s - len(x)) * 'Z')
# B: '_(ac)M(ox)'; 'J': '_(ac)'; 'O': 'M(ox)'; 'Z': None
aminos = '123456ABCDEFGHIJKLMNOPQRSTVWYZ'
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(np.repeat(np.array(list(aminos)), s).reshape(-1, s))
seqs = np.array(list(df.apply(list)))
return enc.transform(seqs).toarray().reshape(df.shape[0], -1, 30)
def ParallelinoIY(self, data):
datasep = [data.iloc[i * self.bs: (i + 1) * self.bs] for i in range(data.shape[0] // self.bs + 1)]
paraY = Parallel(n_jobs=self.n_jobs)(delayed(self.dataapp)(i) for i in datasep)
return np.vstack(paraY).reshape(data.shape[0], -1, 12)
def inoIY(self, x):
MI = pd.DataFrame({'MT0': x[0].split(';'), 'IY': [float(i) for i in x[1].split(';')]})
dk = pd.merge(self.MI0, MI, left_on='MT', right_on='MT0', how='left').drop('MT0', axis=1)
dk.loc[dk.IY.isna(), 'IY'] = 0
dk['IY'] = dk['IY'] / dk['IY'].max()
return dk['IY'].values
def dataapp(self, data):
return np.array(list(data.apply(self.inoIY, axis=1)))
class CNN_BiLSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(CNN_BiLSTM, self).__init__()
self.cov = nn.Sequential(
nn.Conv1d(in_channels=input_dim,
out_channels=64,
kernel_size=3),
nn.ReLU(),
nn.Dropout(0.5))
self.lstm = nn.LSTM(input_size=64,
hidden_size=hidden_dim,
num_layers=layer_dim,
batch_first=True,
bidirectional=True,
dropout=0.5)
self.fc = nn.Sequential(nn.Linear(hidden_dim * 2, output_dim),
nn.Sigmoid())
def forward(self, x):
x = self.cov(x.permute(0, 2, 1))
l_out, (l_hn, l_cn) = self.lstm(x.permute(0, 2, 1), None)
x = self.fc(l_out)
return x
class DeepSpec:
def __init__(self, model=None, seed=0, test_size=0.2, lr=1e-3, l2=0.0,
batch_size=1024, epochs=1000, nepoch=50, patience=50,
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")):
self.test_size = test_size
self.seed = seed
self.batch_size = batch_size
self.device = device
self.patience = patience
self.lr = lr
self.l2 = l2
self.epochs = epochs
self.nepoch = nepoch
self.model = model
def fit(self, bkmsms):
print('+++++++++++++++++++++++++++Loading Trainset+++++++++++++++++++++')
bkmsms['CMS'] = bkmsms['Charge'].map(str) + bkmsms['Modified sequence']
bkmsms['CMS'] = bkmsms['CMS'].apply(lambda x: x.replace('_(ac)M(ox)', 'B').replace(
'_(ac)', 'J').replace('M(ox)', 'O').replace('_', ''))
bkmsms1 = self.selectBestmsms(bkmsms, s=100)[['CMS', 'Matches', 'Intensities']]
x, y = IonCoding().fit_transfrom(bkmsms1)
x = torch.tensor(x, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=self.test_size, random_state=self.seed)
y_true = np.array(y_val).reshape(y_val.shape[0], -1).tolist()
train_db = TensorDataset(x_train, y_train)
train_loader = DataLoader(train_db,
batch_size=self.batch_size,
num_workers=0,
shuffle=True)
val_db = TensorDataset(x_val, y_val)
val_loader = DataLoader(val_db,
batch_size=self.batch_size,
num_workers=0,
shuffle=False)
if self.model is None:
torch.manual_seed(self.seed)
self.model = CNN_BiLSTM(30, 256, 2, 12)
model = self.model.to(self.device)
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=self.lr, weight_decay=self.l2)
val_losses = []
val_cosines = []
self.val_cosine_best = 0.0
counter = 0
print('+++++++++++++++++++DeepSpec Training+++++++++++++++++++++')
for epoch in range(1, self.epochs + 1):
for i, (x_batch, y_batch) in enumerate(train_loader):
model.train()
batch_x = x_batch.to(self.device)
batch_y = y_batch.to(self.device)
out = model(batch_x)
loss = loss_func(out, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
val_loss = 0
y_valpreds = []
for a, b in val_loader:
val_x = a.to(self.device)
val_y = b.to(self.device)
y_valpred = model(val_x)
y_valpreds.append(y_valpred)
val_loss += loss_func(y_valpred, val_y).item() / len(val_loader)
val_losses.append(val_loss)
y_valpreds = torch.cat([y_vp for y_vp in y_valpreds], dim=0)
y_pred = np.array(y_valpreds.cpu()).reshape(y_val.shape[0], -1).tolist()
val_cosine = self.cosine_similarity(y_true, y_pred)
val_cosines.append(val_cosine)
if val_cosine.mean() >= self.val_cosine_best:
counter = 0
self.val_cosine_best = val_cosine.mean()
self.val_loss_best = val_loss
self.bestepoch = epoch
torch.save(model, 'DeepSpec.pkl')
else:
counter += 1
if epoch % self.nepoch == 0 or epoch == self.epochs:
print(
'[{}|{}] val_loss: {} | val_cosine: {}'.format(epoch, self.epochs, val_loss, val_cosine.mean()))
if counter >= self.patience:
print('EarlyStopping counter: {}'.format(counter))
break
print('best epoch [{}|{}] val_ loss: {} | val_cosine: {}'.format(self.bestepoch, self.epochs,
self.val_loss_best, self.val_cosine_best))
self.traininfor = {'val_losses': val_losses, 'val_cosines': val_cosines}
def predict(self, evidence, msms):
dfdb = deepcopy(evidence)
msms = deepcopy(msms).rename(columns={'id': 'Best MS/MS'})
dfdb['CMS'] = dfdb['Charge'].map(str) + dfdb['Modified sequence']
dfdb['CMS'] = dfdb['CMS'].apply(lambda x: x.replace('_(ac)M(ox)', 'B').replace(
'_(ac)', 'J').replace('M(ox)', 'O').replace('_', ''))
dfdb = pd.merge(dfdb, msms[['Best MS/MS', 'Matches', 'Intensities']], on='Best MS/MS', how='left')
dfdb1 = deepcopy(dfdb[(~dfdb['Matches'].isna()) &
(~dfdb['Intensities'].isna()) &
(dfdb['Length'] <= 47) &
(dfdb['Charge'] <= 6)])[['id', 'CMS', 'Matches', 'Intensities']]
print('after filter none Intensities data shape:', dfdb1.shape)
print('+++++++++++++++++++Loading Testset+++++++++++++++++++++')
x_test, y_test = IonCoding().fit_transfrom(dfdb1[['CMS', 'Matches', 'Intensities']])
self.db_test = {'Data': dfdb1, 'x_test': x_test, 'y_test': y_test}
x_test = torch.tensor(x_test, dtype=torch.float)
test_loader = DataLoader(x_test,
batch_size=self.batch_size,
num_workers=0,
shuffle=False)
print('+++++++++++++++++++DeepSpec Testing+++++++++++++++++++++')
y_testpreds = []
model = torch.load('DeepSpec.pkl').to(self.device)
model.eval()
with torch.no_grad():
for test_x in test_loader:
test_x = test_x.to(self.device)
y_testpreds.append(model(test_x))
y_testpred = torch.cat(y_testpreds, dim=0)
y_test = np.array(y_test).reshape(y_test.shape[0], -1)
y_testpred = np.array(y_testpred.cpu())
self.db_test['y_testpred'] = y_testpred
y_testpred = y_testpred.reshape(y_test.shape[0], -1)
CS = self.cosine_similarity(y_test, y_testpred)
self.db_test['Cosine'] = CS
output = pd.DataFrame({'id': dfdb1['id'].values, 'Cosine': CS})
dfdb2 = pd.merge(dfdb, output, on='id', how='left')
dfdb2['PEPCosine'] = dfdb2['Cosine'] / (1 + dfdb2['PEP'])
dfdb2['ScoreCosine'] = dfdb2['Score'] / (1 + dfdb2['Cosine'])
return dfdb2
def cosine_similarity(self, y, y_pred):
a, b = np.array(y), np.array(y_pred)
res = np.array([[sum(a[i] * b[i]), np.sqrt(sum(a[i] * a[i]) * sum(b[i] * b[i]))]
for i in range(a.shape[0])])
return np.divide(res[:, 0], res[:, 1]) # Cosine or DP
# return 1 - 2 * np.arccos(np.divide(res[:, 0], res[:, 1])) / np.pi # SA
def selectBestmsms(self, df, lg=47, cg=6, s=100):
return df[(df['Reverse'] != '+') & (~df['Matches'].isna()) &
(~df['Intensities'].isna()) & (df['Length'] <= lg) &
(df['Charge'] <= cg) & (df['Type'].isin(['MSMS', 'MULTI-MSMS']))
& (df['Score'] > s)].sort_values(
'Score', ascending=False).drop_duplicates(
subset='CMS', keep='first')[['CMS', 'Matches', 'Intensities']]
def ValPlot(self):
val_losses = self.traininfor['val_losses']
val_cosines = self.traininfor['val_cosines']
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
lns1 = ax.plot(range(1, len(val_losses) + 1), val_losses,
color='orange', label='Val Loss={}'.format(round(self.val_loss_best, 5)))
lns2 = ax.axvline(x=self.bestepoch, ls="--", c="b", label='kk')
plt.xticks(size=15)
plt.yticks(size=15)
ax2 = ax.twinx()
lns3 = ax2.plot(range(1, len(val_losses) + 1), [i.mean() for i in val_cosines],
color='red', label='Val Cosine={}'.format(round(self.val_cosine_best, 4)))
lns = lns1 + lns3
labs = [l.get_label() for l in lns]
plt.yticks(size=15)
ax.set_xlabel("Epoch", fontsize=18)
ax.set_ylabel("Val Loss", fontsize=18)
ax2.set_ylabel("Val Cosine", fontsize=18)
ax.legend(lns, labs, loc=10, fontsize=15)
plt.tight_layout()
class LGB_bayesianCV:
def __init__(self, params_init=dict({}), n_splits=3, seed=0):
self.n_splits = n_splits
self.seed = seed
self.params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'n_jobs': -1,
'random_state': self.seed,
'is_unbalance': True,
'silent': True
}
self.params.update(params_init)
def fit(self, x, y):
self.skf = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=self.seed)
self.__x = np.array(x)
self.__y = np.array(y)
self.__lgb_bayesian()
self.model = lgb.LGBMClassifier(**self.params)
# self.cv_predprob = cross_val_predict(self.model, self.__x, self.__y,
# cv=self.skf, method="predict_proba")[:, 1]
self.model.fit(self.__x, self.__y)
# self.feature_importance = dict(zip(self.model.feature_name_, self.model.feature_importances_))
def predict(self, X):
return self.model.predict(np.array(X))
def predict_proba(self, X):
return self.model.predict_proba(np.array(X))
def __lgb_cv(self, n_estimators, learning_rate,
max_depth, num_leaves,
subsample, colsample_bytree,
min_split_gain, min_child_samples,
reg_alpha, reg_lambda):
self.params.update({
'n_estimators':int(n_estimators), # 树个数 (100, 1000)
'learning_rate': float(learning_rate), # 学习率 (0.001, 0.3)
'max_depth': int(max_depth), # 树模型深度 (3, 15)
'num_leaves': int(num_leaves), # 最大树叶子节点数31, (2, 2^md) (5, 1000)
'subsample': float(subsample), # 样本比例 (0.3, 0.9)
'colsample_bytree': float(colsample_bytree), # 特征比例 (0.3, 0.9)
'min_split_gain': float(min_split_gain), # 切分的最小增益 (0, 0.5)
'min_child_samples': int(min_child_samples), # 最小数据叶子节点(5, 1000)
'reg_alpha': float(reg_alpha), # l1正则 (0, 10)
'reg_lambda': float(reg_lambda), # l2正则 (0, 10)
})
model = lgb.LGBMClassifier(**self.params)
cv_score = cross_val_score(model, self.__x, self.__y, scoring="roc_auc", cv=self.skf).mean()
return cv_score
def __lgb_bayesian(self):
lgb_bo = BayesianOptimization(self.__lgb_cv,
{
'n_estimators': (100, 1000), # 树个数 (100, 1000)
'learning_rate': (0.001, 0.3), # 学习率 (0.001, 0.3)
'max_depth': (3, 15), # 树模型深度 (3, 15)
'num_leaves': (5, 1000), # 最大树叶子节点数31, (2, 2^md) (5, 1000)
'subsample': (0.3, 0.9), # 样本比例 (0.3, 0.9)
'colsample_bytree': (0.3, 0.9), # 特征比例
'min_split_gain': (0, 0.5), # 切分的最小增益 (0, 0.5)
'min_child_samples': (5, 200), # 最小数据叶子节点(5, 1000)
'reg_alpha': (0, 10), # l1正则 (0, 10)
'reg_lambda': (0, 10), # l2正则 (0, 10)
},
random_state=self.seed,
verbose=0)
lgb_bo.maximize()
self.best_auc = lgb_bo.max['target']
lgbbo_params = lgb_bo.max['params']
lgbbo_params['n_estimators'] = int(lgbbo_params['n_estimators'])
lgbbo_params['learning_rate'] = float(lgbbo_params['learning_rate'])
lgbbo_params['max_depth'] = int(lgbbo_params['max_depth'])
lgbbo_params['num_leaves'] = int(lgbbo_params['num_leaves'])
lgbbo_params['subsample'] = float(lgbbo_params['subsample'])
lgbbo_params['colsample_bytree'] = float(lgbbo_params['colsample_bytree'])
lgbbo_params['min_split_gain'] = float(lgbbo_params['min_split_gain'])
lgbbo_params['min_child_samples'] = int(lgbbo_params['min_child_samples'])
lgbbo_params['reg_alpha'] = float(lgbbo_params['reg_alpha'])
lgbbo_params['reg_lambda'] = float(lgbbo_params['reg_lambda'])
self.params.update(lgbbo_params)
class LgbBayes:
def __init__(self, out_cv=3, inner_cv=3, seed=0):
self.out_cv = out_cv
self.inner_cv = inner_cv
self.seed = seed
def fit_tranform(self, data, feature_columns, target_column, file_column, protein_column=None):
data_set = deepcopy(data)
x = deepcopy(data_set[feature_columns]).values
y = deepcopy(data_set[target_column]).values
skf = StratifiedKFold(n_splits=self.out_cv, shuffle=True, random_state=self.seed)
cv_index = np.zeros(len(y), dtype=int)
y_prob = np.zeros(len(y))
y_pep = np.zeros(len(y))
feature_importance_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
import re
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import csv
from datetime import datetime
import time
import sys
from multiprocessing import Process
import url_handler
import data_collector
import valuator
now = datetime.now()
expected_income_ratio = 8.12
stock_code_file = "./ual.csv" # Test data
columnlist = ['code', 'name', 'price', 'EV/CF', 'EPS', 'PBR', 'dividend', 'ROE%', 'ROA(8)%', 'ROIC(8)%', 'FCFA(8)%', 'P_MM', 'EBIT/EV', 'GPA', 'ROIC%', 'S-RIM\nprice', 'S-RIM\nmargin', 'Templeton\nprice', 'Templeton\nrank', 'P_FS', 'STA', 'SNOA', 'Moat']
def us_run(us_codes, proc_num, list_chunk = 300, login_id="", login_passwd="", result_file_header = "./result_data/us_choosen_"):
print ("proc_num = {}".format(proc_num))
try:
os.makedirs("./itooza/us/"+now.strftime("%Y%m%d")+"/")
print("Directory ", "./itooza/us/"+now.strftime("%Y%m%d")+"/", " Created ")
except FileExistsError:
print("Directory ", "./itooza/us/"+now.strftime("%Y%m%d")+"/", " Already exists")
stock_code_name_dict = {}
s_rim_buy = []
templeton_buy = []
alldfcontents = []
dfcontents = []
all_revisit_contents = []
revisit_contents = []
for us_code in us_codes:
try:
stock_code = us_code[0]
stock_code_name_dict[us_code[0]] = us_code[1]
print("\n\n\nus_code = {}, stock_name = {}".format(stock_code, us_code[1]))
s_rim_price = 0
templeton_price = 0
df_investing, df_financial, current_price, ev = data_collector.read_data_from_itooza_us(stock_code, login_id, login_passwd)
print(df_investing)
print(df_financial)
ev_cf_ratio = valuator.calculate_ev_cf_ratio(ev, df_financial)
print ("EV/CashFlow = {}".format(ev_cf_ratio))
sta, snoa = valuator.calculate_sta_snoa_probm_us(df_financial)
stock_name_to_print = "{}, {}".format(stock_code, stock_code_name_dict[stock_code])
# Get S-RIM Price
s_rim_price = valuator.s_rim_calculator_us(df_investing, expected_income_ratio, current_price)
if s_rim_price > current_price:
print ("S-RIM: BUY {}".format(stock_name_to_print))
s_rim_buy.append((stock_code, int(s_rim_price)))
else:
print ("S-RIM: DON'T BUY {}".format(stock_name_to_print))
# Get Templeton Price
templeton_price = valuator.templeton_price_calculator_us(df_investing)
if templeton_price > current_price:
print ("Templeton: Strong BUY {}".format(stock_name_to_print))
templeton_buy.append((stock_code, int(templeton_price)))
elif (templeton_price * 2) > current_price:
print ("Templeton: Consider BUY {}".format(stock_name_to_print))
templeton_buy.append((stock_code, int(templeton_price)))
else:
print ("Templeton: DON'T BUY {}".format(stock_name_to_print))
fs_score = valuator.calculate_fs_score_us(df_investing, df_financial)
if fs_score >= 7:
print ("FS score: GOOD {}".format(stock_name_to_print))
p_fs = fs_score/10
moat = valuator.is_economic_moat_us(df_investing, df_financial)
ebit_ev = valuator.get_ebit_ev_us(df_financial, ev)
gpa = valuator.get_gpa_us(df_financial)
roic = float(df_investing.loc[20][1].replace('%',''))
roa_8, roic_8, fcfa_8, p_mm = valuator.calc_economic_moat_us(df_investing, df_financial)
# Save data
df_investing.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_investing.csv", mode="w")
df_financial.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_financial.csv", mode="w")
dfcontents.append(stock_code)
dfcontents.append(stock_code_name_dict[stock_code])
dfcontents.append("{:.2f}".format(current_price))
dfcontents.append("{:.2f}".format(ev_cf_ratio)) # EV/CashFlow
dfcontents.append(df_investing.loc[1][1]) # EPS Consolidated
dfcontents.append(df_investing.loc[8][1]) # PBR
dfcontents.append(df_investing.loc[3][1]) # dividend
dfcontents.append(df_investing.loc[18][1]) # ROE
dfcontents.append("{:.2f}".format(roa_8*100)) # ROA(8)
dfcontents.append("{:.2f}".format(roic_8*100)) # ROIC(8)
dfcontents.append("{:.2f}".format(fcfa_8*100)) # FCFA(8)
dfcontents.append(p_mm) # MM
dfcontents.append(ebit_ev) # EBIT/EV
dfcontents.append(gpa) # GPA
dfcontents.append(roic) # ROIC
dfcontents.append("{:.2f}".format(s_rim_price))
dfcontents.append("{:.2f}".format(((s_rim_price-current_price)/current_price)*100)+"%")
dfcontents.append("{:.2f}".format(templeton_price))
if (templeton_price > current_price):
dfcontents.append(1)
elif ((templeton_price *2) > current_price):
dfcontents.append(2)
else:
dfcontents.append(99)
dfcontents.append(p_fs)
dfcontents.append('{:.2f}%'.format(sta))
dfcontents.append('{:.2f}%'.format(snoa))
dfcontents.append(moat)
if len(dfcontents) > 0:
alldfcontents.append(dfcontents)
dfcontents = []
except Exception as e:
if e == KeyboardInterrupt:
break
else:
revisit_contents.append(stock_code)
revisit_contents.append(stock_code_name_dict[stock_code])
revisit_contents.append(str(e))
all_revisit_contents.append(revisit_contents)
revisit_contents = []
continue
result_df = pd.DataFrame(columns=columnlist, data=alldfcontents)
print(result_df)
result_file = result_file_header
result_file = result_file + now.strftime("%Y%m%d") + "_" + str(proc_num) + ".csv"
print ("result_file = {}".format(result_file))
result_df.to_csv(result_file, mode="w")
if len(all_revisit_contents) > 0 and len(all_revisit_contents[0]) > 3:
revisit_columns = ['code', 'name', 'reason']
revisit_df = pd.DataFrame(columns=revisit_columns, data=all_revisit_contents)
revisit_df.to_csv('revisit_list_'+now.strftime("%Y%m%d") + "_" + str(proc_num) +'.csv', mode="w")
def us_run_from_files(us_codes, proc_num, data_location = "./itooza/us/20200207/", result_file_header = "./result_data/us_choosen_"):
print ("proc_num = {}".format(proc_num))
stock_code_name_dict = {}
s_rim_buy = []
templeton_buy = []
alldfcontents = []
dfcontents = []
all_revisit_contents = []
revisit_contents = []
for us_code in us_codes:
try:
stock_code = us_code[0]
s_rim_price = 0
templeton_price = 0
df_investing, df_financial, current_price, ev = data_collector.read_data_from_files_us(stock_code, data_location.split('/')[-2])
print(df_investing)
print(df_financial)
ev_cf_ratio = valuator.calculate_ev_cf_ratio(ev, df_financial)
print ("EV/CashFlow = {}".format(ev_cf_ratio))
sta, snoa = valuator.calculate_sta_snoa_probm_us(df_financial)
# Get S-RIM Price
s_rim_price = valuator.s_rim_calculator_us(df_investing, expected_income_ratio, current_price)
if s_rim_price > current_price:
print ("S-RIM: BUY {}".format(stock_code))
s_rim_buy.append((stock_code, int(s_rim_price)))
else:
print ("S-RIM: DON'T BUY {}".format(stock_code))
# Get Templeton Price
templeton_price = valuator.templeton_price_calculator_us(df_investing)
if templeton_price > current_price:
print ("Templeton: Strong BUY {}".format(stock_code))
templeton_buy.append((stock_code, int(templeton_price)))
elif (templeton_price * 2) > current_price:
print ("Templeton: Consider BUY {}".format(stock_code))
templeton_buy.append((stock_code, int(templeton_price)))
else:
print ("Templeton: DON'T BUY {}".format(stock_code))
fs_score = valuator.calculate_fs_score_us(df_investing, df_financial)
if fs_score >= 7:
print ("FS score: GOOD {}".format(stock_code))
p_fs = fs_score/10
moat = valuator.is_economic_moat_us(df_investing, df_financial)
ebit_ev = valuator.get_ebit_ev_us(df_financial, ev)
gpa = valuator.get_gpa_us(df_financial)
roic = float(df_investing.loc[20][1].replace('%',''))
roa_8, roic_8, fcfa_8, p_mm = valuator.calc_economic_moat_us(df_investing, df_financial)
# Save data
df_investing.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_investing.csv", mode="w")
df_financial.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_financial.csv", mode="w")
dfcontents.append(stock_code)
dfcontents.append("{:.2f}".format(current_price))
dfcontents.append("{:.2f}".format(ev_cf_ratio)) # EV/CashFlow
dfcontents.append(df_investing.loc[1][1]) # EPS Consolidated
dfcontents.append(df_investing.loc[8][1]) # PBR
dfcontents.append(df_investing.loc[3][1]) # dividend
dfcontents.append(df_investing.loc[18][1]) # ROE
dfcontents.append("{:.2f}".format(roa_8*100)) # ROA(8)
dfcontents.append("{:.2f}".format(roic_8*100)) # ROIC(8)
dfcontents.append("{:.2f}".format(fcfa_8*100)) # FCFA(8)
dfcontents.append(p_mm) # MM
dfcontents.append(ebit_ev) # EBIT/EV
dfcontents.append(gpa) # GPA
dfcontents.append(roic) # ROIC
dfcontents.append("{:.2f}".format(s_rim_price))
dfcontents.append("{:.2f}".format(((s_rim_price-current_price)/current_price)*100)+"%")
dfcontents.append("{:.2f}".format(templeton_price))
if (templeton_price > current_price):
dfcontents.append(1)
elif ((templeton_price *2) > current_price):
dfcontents.append(2)
else:
dfcontents.append(99)
dfcontents.append(p_fs)
dfcontents.append('{:.2f}%'.format(sta))
dfcontents.append('{:.2f}%'.format(snoa))
dfcontents.append(moat)
if len(dfcontents) > 0:
alldfcontents.append(dfcontents)
dfcontents = []
except Exception as e:
if e == KeyboardInterrupt:
break
else:
revisit_contents.append(stock_code)
revisit_contents.append(str(e))
all_revisit_contents.append(revisit_contents)
revisit_contents = []
continue
result_df =
|
pd.DataFrame(columns=columnlist, data=alldfcontents)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import pandas as pd
from pipeline.modeling.data_utils import TransformDF
from pipeline.modeling.datasets import TimeSeriesDataset
from pipeline.common.function_utils import timeit
from pipeline.common.optimize_pandas import optimize
from pipeline.modeling.data_to_df import LoadDF
import math
class MakeTurnsDataset():
def __init__(self, train_sessions, val_sessions, fdf_path, features=["at","ang","head","perfectmatch"], shift=0) -> None:
self.fdf_path = fdf_path
self.shift = shift
df = self.get_df(train_sessions,"chris", features)
df.to_feather(fdf_path+".train")
df = self.get_df(val_sessions,"chris", features)
df.to_feather(fdf_path+".val")
df = self.get_df(range(1,16),"kalin", features)
df.to_feather(fdf_path+".test")
def get_df(self,sessions,dataset, features):
if dataset == "chris":
skip = [4,5,14,18,19]
else:
skip=[]
positions = ["right","left","center"]
sdfs, pcdfs, scdfs, at_mdfs, ang_mdfs = [], [], [], [], []
for session in sessions:
if session not in skip:
for person in positions:
looking_at, speaker_labels, sync, perf, gaze_angles = get_individuals_dataframes(session, person, "pose", "superlarge", dataset, self.shift)
at_multipliers = get_gazed_at_multiplier(person, looking_at)
ang_multipliers = get_gaze_angle_multiplier(person, gaze_angles)
sdfs.append(speaker_labels)
pcdfs.append(perf)
scdfs.append(sync)
at_mdfs.append(at_multipliers)
ang_mdfs.append(ang_multipliers)
dfs = {}
speaker_labels = pd.concat(sdfs, axis=0)
dfs["perfectmatch"] = pd.concat(pcdfs, axis=0)
dfs["syncnet"] = pd.concat(scdfs, axis=0)
dfs["at"] = pd.concat(at_mdfs, axis=0)
dfs["ang"] = pd.concat(ang_mdfs, axis=0)
to_concat = [speaker_labels]
for f in features:
to_concat.append(dfs[f])
df = pd.concat(to_concat, axis=1)
df.reset_index(inplace=True,drop=True)
return df
def get_gazed_at_multiplier(person_of_interest, looking_at, add_to_multiply=.5, base_multiple=1):
"""Creates a 'per-frame' multiplier based on the # of people looking at the person_of_interest
frame_multiplier = base_multiple + (add_to_multiply * [# of people looking at that person])
note: Doesn't include the robot 'looking' at someone.
Args:
person_of_interest (str): subject under consideration as active speaker
looking_at (df): df with group members as the columns and who they are looking at in each row
add_to_multiply (float, optional): % to add for each person looking at the poi. Defaults to .5.
base_multiple (int, optional): multiplier if no one is looking at the poi. Defaults to 1.
Returns:
[df]: a mutliplier value for every frame
"""
# Set baseline multiple
looking_at["multiple_at"] = base_multiple
for p in ["left", "right", "center"]:
looking_at.loc[looking_at[p]==person_of_interest,"multiple_at"] += add_to_multiply
return looking_at[["multiple_at"]].copy()
def get_gaze_angle_multiplier(person_of_interest, angles_df, lower_bound=1, upper_bound=2):
"""Creates a 'per-frame' multiplier based on the average delta between group members gaze
and the head of the person of interest.
The goal is to map the angles from 0 (directly at the person) to 75 (the outer edge of the fov)
to a range of max [2] to min [0].
deg_angle = mean_angle*180/pi
max = 0 * m + b
min = 75 * m + max
b=max
m=(min-max)/75
Args:
person_of_interest (str): subject under consideration as active speaker
angles_df (df): df of every combination of gaze-to-head angles. Columns are labeled
with ['person->subject'] headers such that the column contains the angle
between person's gaze the vector from the person's head to the subject's head
rad_thresh (float, optional): [description]. Defaults to .7.
Returns:
[type]: [description]
"""
angles_df["multiple_ang"] = lower_bound
columns_of_interest = [f"{p}->{person_of_interest}" for p in ["left", "right", "center"] if p != person_of_interest]
m = (lower_bound-upper_bound)/75
angles_df["multiple_ang"] = angles_df[columns_of_interest].mean(axis=1) * (180/math.pi) * m + upper_bound
# print(lower_bound, upper_bound)
# print(angles_df["multiple_ang"].min(),angles_df["multiple_ang"].max())
assert angles_df["multiple_ang"].max() <= upper_bound, "Check your math"
return angles_df[["multiple_ang"]].copy()
def get_individuals_dataframes(session, person, direction_type, size, dataset, shift):
# Note: all csv's have headers so positions should be irrelevant
if dataset=="kalin":
base_path = "/home/chris/code/modeling-pipeline/data/active_speaker/"
# csv with a column for each speaker label with text labels for who they are gazing at
looking_at = pd.read_csv(f"{base_path}/kinect_pose/{session}G3_KINECT_DISCRETE_{size.upper()}.csv")
# csv with a column for each permutation of looker and subject with angle in radians
# e.g. "left->right" | "left->center" | "right->left" | etc.
gaze_angles = pd.read_csv(f"{base_path}/kinect_pose/{session}G3_KINECT_CONTINUOUS.csv")
# csv with a column for each speaker label with binary values for talking or not talking
turns = pd.read_csv(f"{base_path}/kinect_pose/{session}G3_VAD.csv")
# csv with a single columns labeled "Confidence" and values from syncnet output
if person == 'center':
sconfidences =
|
pd.read_csv(f"{base_path}/kinect_pose/{session}G3C_SYNCNET.csv")
|
pandas.read_csv
|
import os
import pandas as pd
from datetime import datetime
# from urllib.request import urlopen
# from io import BytesIO
# from zipfile import ZipFile
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def update_log(file_name):
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S (UTC+07:00)")
log = f'{file_name} downloaded at {current_time}\n'
with open('download-log.txt', 'a') as f:
f.write(log)
f.close()
def download_to_csv(date, symbol, periodical='monthly'):
url = f'https://data.binance.vision/data/spot/{periodical}/klines/{symbol}/1d/{symbol}-1d-{date}.zip'
file_name = f'{symbol}-1d-{date}'
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Documentation
"""
import os
import sys
import pandas as pd
import numpy as np
import csv
import time
import requests
import uuid
from io import StringIO
import json
class Translate_App():
"""
Translate App
=============
The Translate_App class contains all the functions to run the translation app
"""
retry_counter = 0
def __init__(self, input_dict, **kwargs):
self.validate_translate_app(input_dict)
def validate_translate_app(self, input_dict):
"""
All processes in py_gfkdata require self to be created and validated.
"""
self.ERROR = []
self.LOG = []
self.run_type = "translate"
self.remove_autodetect = False
self.input_df_or_path_or_buffer = input_dict["input_df_or_path_or_buffer"]
self.output_file_path = input_dict["output_file_path"]
if "translate_cols_input" in input_dict:
if "," in input_dict["translate_cols_input"]:
input_dict["translate_cols_input"] = input_dict["translate_cols_input"].replace(",",";")
if "|" in input_dict["translate_cols_input"]:
input_dict["translate_cols_input"] = input_dict["translate_cols_input"].replace("|",";")
self.cols_list = input_dict["translate_cols_input"].split(";")
else:
err = "the parameter 'translate_cols_input' is not present, it is required for translate process"
self.ERROR.append(err)
if "delimiter" in input_dict:
self.delimiter = self.delimiter_validation(input_dict["delimiter"], raise_error=False)
if not hasattr(self,"delimiter"):
self.delimiter = "\t"
if "subscriptionKey" in input_dict:
self.subscriptionKey = input_dict["subscriptionKey"]
else:
err = "the parameter 'subscriptionKey' is not present, it is required for translate process"
self.ERROR.append(err)
if "tolang" in input_dict:
self.tolang = input_dict["tolang"]
else:
err = "the parameter 'tolang' is not present, it is required for translate process"
self.ERROR.append(err)
self.fromlang = "autodetect"
if "fromlang" in input_dict:
if input_dict["fromlang"]:
self.fromlang = input_dict["fromlang"]
if self.fromlang == "autodetect":
self.fromlang = None
if self.fromlang:
self.fromlang = input_dict["fromlang"]
self.url = self.get_constructed_url(self.tolang, self.fromlang)
self.create_id = False
if not "id_variable" in input_dict:
err = "the parameter 'id_variable' is not present, it is required for translate process"
self.ERROR.append(err)
if "id_variable" in input_dict:
if input_dict["id_variable"]:
self.id_variable = input_dict["id_variable"]
self.create_id = False
if "translated_suffix" in input_dict:
if input_dict["translated_suffix"]:
self.translated_suffix = input_dict["translated_suffix"]
if not hasattr(self, "translated_suffix"):
self.translated_suffix = "_Translated"
self.batch_translate_cols = []
self.dup_translate_cols = []
self.error_response = {"response" : ""}
self.translate_to_cols = []
for col in self.cols_list:
self.translate_to_cols.append("{0}{1}".format(col, self.translated_suffix))
self.source_translated_dfs = {}
self.to_translate_dfs = {}
self.translated_dfs = {}
for idx, col in enumerate(self.cols_list):
translate_col = self.translate_to_cols[idx]
cols = [self.id_variable, col, translate_col]
self.source_translated_dfs[col] =
|
pd.DataFrame(columns=cols)
|
pandas.DataFrame
|
import sys
import matplotlib.pyplot as plt
import pandas as pd
import seaborn
def print_as_comment(obj):
print("\n".join(f"# {line}" for line in str(obj).splitlines()))
if __name__ == "__main__":
sys.path.append("../..")
seaborn.set_style("whitegrid")
# ---
import pandas as pd
import epymetheus as ep
from epymetheus.benchmarks import dumb_strategy
# ---
my_strategy = ep.create_strategy(dumb_strategy, profit_take=20.0, stop_loss=-10.0)
# ---
from epymetheus.datasets import fetch_usstocks
universe = fetch_usstocks()
print(">>> universe.head()")
print_as_comment(universe.head())
print(">>> my_strategy.run(universe)")
my_strategy.run(universe)
# ---
df_history = my_strategy.history()
df_history.head()
print(">>> df_history.head()")
print_as_comment(df_history.head())
# ---
series_wealth = my_strategy.wealth()
print(">>> series_wealth.head()")
print_as_comment(series_wealth.head())
plt.figure(figsize=(16, 4))
plt.plot(series_wealth, linewidth=1)
plt.xlabel("date")
plt.ylabel("wealth [USD]")
plt.title("Wealth")
plt.savefig("wealth.png", bbox_inches="tight", pad_inches=0.1)
# ---
print(">>> my_strategy.score('final_wealth')")
print_as_comment(my_strategy.score("final_wealth"))
print(">>> my_strategy.score('max_drawdown')")
print_as_comment(my_strategy.score("max_drawdown"))
# my_strategy.score("sharpe_ratio")
# ---
drawdown = my_strategy.drawdown()
exposure = my_strategy.net_exposure()
plt.figure(figsize=(16, 4))
plt.plot(
|
pd.Series(drawdown, index=universe.index)
|
pandas.Series
|
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert
|
isna(result.iloc[3])
|
pandas.isna
|
import pandas as pd
import time
from preprocessing import dbpedia_utils as from_dbpedia
from preprocessing import wikidata_utils as from_wikidata
from caserec.utils.split_database import SplitDatabase
ml_small_path = "./datasets/ml-latest-small/interactions.csv"
original_ml_small_path = "./datasets/ml-latest-small/ratings.csv"
movies_ml_small_path = "./datasets/ml-latest-small/movies.csv"
link_ml_small_path = "./datasets/ml-latest-small/links.csv"
db_uri_ml_small_path = "./generated_files/dbpedia/ml-latest-small/uri_dbpedia_movielens_small.csv"
db_nan_ml_small_path = "./generated_files/dbpedia/ml-latest-small/nan_uri_dbpedia_movielens_small.csv"
db_final_ml_small_path = "./generated_files/dbpedia/ml-latest-small/final_uri_dbpedia_movielens_small.csv"
wikidata_props_ml_small = "./generated_files/wikidata/props_wikidata_movielens_small.csv"
dbpedia_props_ml_small = "./generated_files/dbpedia/ml-latest-small/props_dbpedia_movielens_small.csv"
def read_movie_info():
"""
Function that reads the name of the movies of the small movielens dataset
:return: pandas DataFrame with the movieId column as index and title as value
"""
return pd.read_csv(movies_ml_small_path, usecols=['movieId', 'title']).set_index(['movieId'])
def read_links_info():
"""
Function that reads the imbd set of the movies of the small movielens dataset
:return: pandas DataFrame with the movieId column as index and imdbId as value
"""
return pd.read_csv(link_ml_small_path, usecols=['movieId', 'imdbId']).set_index(['movieId'])
def read_uri_info():
"""
Function that reads the name of the movies of the small movielens dataset with the uri from
the generate_movies_uri_dbpedia_dataset function
:return: pandas DataFrame with the movieId column as index and title and uri as value
"""
return pd.read_csv(db_uri_ml_small_path, ).set_index(['movieId'])
def read_nan_info():
"""
Function that reads the name of the movies of the small movielens dataset with the uri from
the generate_recovery_uri_dbpedia function
:return: pandas DataFrame with the movieId column as index and title as value
"""
return pd.read_csv(db_nan_ml_small_path).set_index(['movieId'])
def read_final_uri_dbpedia_dataset():
"""
Function that reads the name of the movies of the small movielens dataset
:return: pandas DataFrame with the movieId column as index and title as value
"""
return pd.read_csv(db_final_ml_small_path).set_index(['movieId'])
def read_user_item_interaction():
"""
Function that reads the user interactions with the movies of the small movielens dataset
:return: pandas DataFrame of the dataset
"""
return pd.read_csv(ml_small_path)
def user_artist_filter_interaction(n_inter: int, n_iter_flag=False):
"""
:param n_inter: minimum number of interactions for each user
:param n_iter_flag: flag to filter or not by number of interactions
:return: file
"""
interac = pd.read_csv(original_ml_small_path)
interac = interac.set_index('userId')
props = pd.read_csv(wikidata_props_ml_small)
filter_interactions = interac[interac['movieId'].isin(list(props['movieId'].unique()))]
implicit = pd.DataFrame()
if n_iter_flag:
for u in filter_interactions.index.unique():
u_set = filter_interactions.loc[u]
if len(u_set) >= n_inter:
implicit = pd.concat([implicit, u_set.reset_index()], ignore_index=True)
implicit.to_csv(ml_small_path, header=None, index=False)
return
filter_interactions.to_csv(ml_small_path, header=None, index=False)
def __get_movie_strings(full_name: str):
"""
Function that, given a full name on the movielens dataset: Initially, the string is separated by " (" string to
exclude the year, than the first step is to extract the first and eventually second and third names. Finally, the
articles of the pt, en, es, it, fr and de are placed on the beginning of the string
:param full_name: movie name on movielens dataset that follows the patters "name (year)"; "name, The (year)";
"name (a.k.a. second_name) (year)" and "name (second_name) (third_name) (year)" and combinations
:return: a list with all possible movie names on dbpedia
"""
# remove year of string, if there is no year, then return the full name
try:
all_names = full_name.split(" (")
all_names = all_names[:-1]
format_names = [all_names[0]]
except IndexError:
return [full_name]
if len(all_names) > 1:
for i in range(1, len(all_names)):
# get names on a.k.a parenthesis, else get name between parenthesis
if all_names[i].find("a.k.a. ") != -1:
format_names.append(all_names[i].split("a.k.a. ")[1][:-1])
else:
format_names.append(all_names[i][:-1])
# place articles in front of strings
for i in range(0, len(format_names)):
fn = format_names[i]
has_coma = fn.split(", ")
if len(has_coma[-1]) <= 4:
fn = has_coma[-1] + ' ' + fn[:-5]
format_names[i] = fn
return format_names
def generate_movies_uri_dbpedia_dataset():
"""
Function that generates the dataset with movieId, title and uri from dbpedia by invoking, for every title of movie
the function get_movie_uri_from_dbpedia. Because the API can cause timeout, the DBPedia API is called 10 times until
it successfully works, if it does not, then the dataset is saved in disk with the current state of the dataset
:return: A DataFrame with movieId, title and uri from dbpedia
"""
movies = read_movie_info()
movies_uri = pd.DataFrame(index=movies.index, columns=['uri'])
print("Obtaining the URIs of the small MovieLens dataset")
for index, row in movies.iterrows():
names = __get_movie_strings(row[0])
for name in names:
uri_name = ""
n = 0
while True:
try:
uri_name = from_dbpedia.get_movie_uri_from_dbpedia(name)
except Exception as e:
n = n + 1
print("n:" + str(n) + " Exception: " + str(e))
if n == 10:
full_movies =
|
pd.concat([movies, movies_uri], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 23:47:26 2017
@author: christopher.ottesen
"""
import pandas as pd
import fitbit_read as init
from datetime import date, timedelta
from datetime import datetime
import glob, os
import ast
import json
authd_client = init.authd_client
file_path_hr_readings = 'data/fitbit_heart_rate/'
def get_heart_rate(timestamp = "today"):
heartRates = authd_client.intraday_time_series("activities/heart", base_date=timestamp, detail_level='1sec', start_time=None, end_time=None)
heartRates = heartRates["activities-heart-intraday"]["dataset"]
heartRates = pd.DataFrame(heartRates)
return heartRates
def get_steps(timestamp = "today"):
steps = authd_client.intraday_time_series("activities/steps", base_date=timestamp, detail_level='1min')
steps = steps["activities-steps-intraday"]["dataset"]
steps = pd.DataFrame(steps)
return steps
def get_sleep(timestamp = "today"):
sleep = authd_client.sleep(date=timestamp)
print(sleep)
sleep = pd.DataFrame(sleep['sleep'])
return sleep
def steps_to_csv(df,name):
df.to_csv(file_path_hr_readings+name+".csv", index=False)
def make_Date_list(base_date = "2017-07-12"):
# the day I got the fitbit
today = datetime.now().strftime('%Y-%m-%d')
d1 = date(*map(int, base_date.split("-"))) # start date
d2 = date(*map(int, today.split("-"))) # end date
d1 = (d1 + timedelta(days=-1)) # re getting the latest day in case we fetched before the day had ended
delta = d2 - d1 # timedelta
date_range = []
for i in range(delta.days + 1):
t = (d1 + timedelta(days=i)).strftime('%Y-%m-%d')
date_range.append(t)
return date_range
def find_newest_date():
allFiles = glob.glob(file_path_hr_readings + "/*.csv")
date_list = []
for cur_date in allFiles:
print(cur_date)
try:
cur_date = cur_date.split("/")[2].split(".c")[0]
except Exception as e:
print(e)
cur_date = cur_date.split("\\")[1].split(".c")[0]
print(cur_date)
cur_date = date(*map(int, cur_date.split("-"))) # start date
date_list.append(cur_date)
return max(date_list) # returns the newst date in the lis
def get_date_range():
try:
start_date_from_newest = (find_newest_date()+timedelta(days=1)).strftime('%Y-%m-%d') # we need to add one day to not duplicate
except:
start_date_from_newest = "2017-07-12"
return make_Date_list(start_date_from_newest),start_date_from_newest
from time import sleep
def get_fitbit_data(date_range, func_type = "steps"):
if func_type == "steps":
getter = get_steps
if func_type == "heart":
getter = get_heart_rate
if func_type == 'sleep':
getter = get_sleep
for cur_date in date_range:
print("Getting date " + cur_date)
try:
temp_df = getter(str(cur_date))
temp_df["date"] = str(cur_date)
steps_to_csv(temp_df,str(cur_date))
except Exception as e:
print("Failed getting data for " + cur_date )
print(e)
print("sleeping for a few seconds")
sleep(5)
def import_fitbit_hr(file_path_hr_readings):
allFiles = glob.glob(file_path_hr_readings + "/*.csv")
list_ = []
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
list_.append(df)
return list_
def join_all_csv(file_path='data/fitbit_heart_rate/'):
all_files = glob.glob(os.path.join(file_path, "*.csv"))
df_from_each_file = (
|
pd.read_csv(f)
|
pandas.read_csv
|
#!/usr/bin/python3
import argparse
import os
import sys
import webbrowser
from datetime import timedelta
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import requests_cache
from plotly import graph_objs as go
from plotly.subplots import make_subplots
from tqdm import tqdm
from finance_benchmark import config
def get_asset_data(assets_ticker, startdate):
"""Retrieve assets data from yahoo finance
Args:
assets_ticker ([str]): list of assets to download
startdate (str): start date
"""
df = pd.DataFrame()
for ticker in tqdm(assets_ticker): # progress bar when downloading data
ticker = ticker.strip()
# cache data to avoid downloading them again when filtering
session = requests_cache.CachedSession(
cache_name="../cache", backend="sqlite", expire_after=timedelta(days=1)
)
try:
# Get daily closing price
data = web.DataReader(
ticker, data_source="yahoo", start=startdate, session=session
)["Close"]
except Exception:
print("Error fetching : " + ticker)
continue
data =
|
pd.DataFrame({"Date": data.index, ticker: data.values})
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
def get_youtube_search(query,order,regionCode,channel_id = ''):
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
#os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
if channel_id == '':
request = youtube.search().list(
part="snippet",
maxResults=50,
order=order,
q=query,
regionCode=regionCode
)
else:
request = youtube.search().list(
part="snippet",
maxResults=50,
channelId=channel_id,
order=order,
q=query,
regionCode=regionCode
)
response = request.execute()
return response
#-----------Россия 24-------------
search_result_1 = get_youtube_search(query = "Россия 24", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU",channel_id = 'UC_IEcnNeHc_bwd92Ber-lew')
search_result_2_df = pd.DataFrame(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU",channel_id = 'UC_IEcnNeHc_bwd92Ber-lew')
search_result_3_df = pd.DataFrame(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU",channel_id = 'UC_IEcnNeHc_bwd92Ber-lew')
search_result_4_df = pd.DataFrame(search_result_4['items'])
data_for_wordcloud = []
for i in range(2,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split(' - Россия 24')[0]
data_for_wordcloud.append(title)
for i in range(2,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split(' - Россия 24')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split(' Специальный репортаж')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split(' - Россия 24')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace(' - россия 24','')
title = title.replace('россия 24','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('александра сладкова','')
title = title.replace('москва 24','')
title = title.replace('москва. кремль. кутин','')
title = title.replace('// . от 07.03.2021','')
title = title.replace('авторская программа <NAME>','')
title = title.replace('марата кримчеева','')
title = title.replace('фильм анны афанасьевой','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('репортаж','')
title = title.replace('интервью','')
title = title.replace('программа','')
title = title.replace('эксклюзивный','')
title = title.replace('дежурная часть','')
title = title.replace('вести недели с дмитрием киселевым','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('путина','путин')
title = title.replace('россии','россия')
title = title.replace('китая','китай')
title = title.replace('донбассе','донбасс')
title = title.replace('карабахе','карабах')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_r24 = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_r24 = data_for_wordcloud_preprocessed_string_r24 + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="red").generate(data_for_wordcloud_preprocessed_string_r24)
wordcloud.to_file("Russia24_WordCloud.png")
#-----------RT на русском-------------
search_result_1 = get_youtube_search(query = "RT на русском", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCFU30dGHNhZ-hkh0R10LhLw')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCFU30dGHNhZ-hkh0R10LhLw')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCFU30dGHNhZ-hkh0R10LhLw')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split(' / ')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('новости','')
title = title.replace('репортаж','')
title = title.replace('репортаж','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путин:','путин')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('украинские','украина')
title = title.replace('украинской','украина')
title = title.replace('украине','украина')
title = title.replace('украину','украина')
title = title.replace('украины','украина')
title = title.replace(': ',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_rt = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_rt = data_for_wordcloud_preprocessed_string_rt + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="green").generate(data_for_wordcloud_preprocessed_string_rt)
wordcloud.to_file("RT_WordCloud.png")
#-----------<NAME>-------------
search_result_1 = get_youtube_search(query = "<NAME>", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCdubelOloxR3wzwJG9x8YqQ')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCdubelOloxR3wzwJG9x8YqQ')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCdubelOloxR3wzwJG9x8YqQ')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split(' /')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('репортаж','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('навального','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace(': ',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_rain = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_rain = data_for_wordcloud_preprocessed_string_rain + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="purple").generate(data_for_wordcloud_preprocessed_string_rain)
wordcloud.to_file("TvRain_WordCloud.png")
#-----------DW на русском-------------
search_result_1 = get_youtube_search(query = "DW на русском", order = "viewCount", regionCode = "RU")
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCXoAjrdHFa2hEL3Ug8REC1w')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCXoAjrdHFa2hEL3Ug8REC1w')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCXoAjrdHFa2hEL3Ug8REC1w')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split('DW Новости')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('dw','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путину','путин')
title = title.replace('москве','москва')
title = title.replace('москву','москва')
title = title.replace('навального','навальный')
title = title.replace('навальным','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace('германии','германия')
title = title.replace('германию','германия')
title = title.replace('кремля','кремль')
title = title.replace('санкций','санкции')
title = title.replace(': ',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_dw = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_dw = data_for_wordcloud_preprocessed_string_dw + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "white",
relative_scaling=0.5,
background_color="blue").generate(data_for_wordcloud_preprocessed_string_dw)
wordcloud.to_file("DW_WordCloud.png")
#-----------Настоящее Время-------------
search_result_1 = get_youtube_search(query = "Настоящее Время", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_2_df = pd.DataFrame.from_dict(search_result_2['items'])
search_result_3 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_3_df = pd.DataFrame.from_dict(search_result_3['items'])
search_result_4 = get_youtube_search(query = "новости", order = "viewCount", regionCode = "RU", channel_id = 'UCBG57608Hukev3d0d-gvLhQ')
search_result_4_df = pd.DataFrame.from_dict(search_result_4['items'])
data_for_wordcloud = []
for i in range(1,search_result_1_df.shape[0]):
title = search_result_1_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_2_df.shape[0]):
title = search_result_2_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_3_df.shape[0]):
title = search_result_3_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
for i in range(1,search_result_4_df.shape[0]):
title = search_result_4_df.snippet[i]['title']
title = title.split('|')[0]
data_for_wordcloud.append(title)
data_for_wordcloud_set = set(data_for_wordcloud)
data_for_wordcloud_preprocessed = []
for title in data_for_wordcloud_set:
title = title.lower()
title = title.replace('"','')
title = title.replace('специальный репортаж','')
title = title.replace('документальный фильм','')
title = title.replace('последние','')
title = title.replace('новости','')
title = title.replace('"','')
title = title.replace('репортаж','')
title = title.replace('dw','')
title = title.replace(' на ',' ')
title = title.replace(' не ',' ')
title = title.replace(' из ',' ')
title = title.replace(' за ',' ')
title = title.replace(' для ',' ')
title = title.replace(' по ',' ')
title = title.replace(' от ',' ')
title = title.replace('россии','россия')
title = title.replace('путина','путин')
title = title.replace('путину','путин')
title = title.replace('москве','москва')
title = title.replace('москву','москва')
title = title.replace('навального','навальный')
title = title.replace('навальным','навальный')
title = title.replace('владимира','владимир')
title = title.replace('коронавируса','коронавирус')
title = title.replace('беларуси','беларусь')
title = title.replace('германии','германия')
title = title.replace('германию','германия')
title = title.replace('кремля','кремль')
title = title.replace('санкций','санкции')
title = title.replace('таджикистана','таджикистан')
title = title.replace('таджикистане','таджикистан')
title = title.replace('казахстана','казахстан')
title = title.replace('казахстане','казахстан')
title = title.replace('кыргызстана','кыргызстан')
title = title.replace('кыргызстане','кыргызстан')
title = title.replace('хабаровского','хабаровск')
title = title.replace('хабаровске','хабаровск')
title = title.replace(': ',' ')
title = title.replace('.',' ')
data_for_wordcloud_preprocessed.append(title)
data_for_wordcloud_preprocessed_string_nt = ''
for title in data_for_wordcloud_preprocessed:
data_for_wordcloud_preprocessed_string_nt = data_for_wordcloud_preprocessed_string_nt + title + ' '
wordcloud = WordCloud(width = 1000,
height = 500,
max_font_size=400,
max_words=200,
color_func=lambda *args, **kwargs: "darkblue",
relative_scaling=0.5,
background_color="white").generate(data_for_wordcloud_preprocessed_string_nt)
wordcloud.to_file("NT_WordCloud.png")
#-----------Новости на Первом канале-------------
search_result_1 = get_youtube_search(query = "", order = "viewCount", regionCode = "RU", channel_id = 'UCKonxxVHzDl55V7a9n_Nlgg')
search_result_1_df = pd.DataFrame.from_dict(search_result_1['items'])
search_result_2 = get_youtube_search(query = "репортаж", order = "viewCount", regionCode = "RU", channel_id = 'UCKonxxVHzDl55V7a9n_Nlgg')
search_result_2_df =
|
pd.DataFrame.from_dict(search_result_2['items'])
|
pandas.DataFrame.from_dict
|
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
|
Timedelta("foo")
|
pandas.Timedelta
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
|
tm.assert_index_equal(rng, expected)
|
pandas.util.testing.assert_index_equal
|
from datetime import timezone
from functools import lru_cache, wraps
from typing import List, Optional
import numpy as np
from pandas import Index, MultiIndex, Series, set_option
from pandas.core import algorithms
from pandas.core.arrays import DatetimeArray, datetimes
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.core.base import IndexOpsMixin
from pandas.core.dtypes import common
from pandas.core.dtypes.cast import maybe_cast_to_datetime
from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import na_value_for_dtype
import pandas.core.internals.construction
from pandas.core.internals.construction import DtypeObj, lib, Scalar
from pandas.core.reshape.merge import _MergeOperation, _should_fill
def nan_to_none_return(func):
"""Decorate to replace returned NaN-s with None-s."""
@wraps(func)
def wrapped_nan_to_none_return(*args, **kwargs):
r = func(*args, **kwargs)
if r != r:
return None
return r
return wrapped_nan_to_none_return
def patch_pandas():
"""
Patch pandas internals to increase performance on small DataFrame-s.
Look: Pandas sucks. I mean it. Every minor release breaks the public API, performance is awful,
maintainers are ignorant, etc. But we don't have an alternative given our human resources.
So:
- Patch certain functions to improve the performance for our use-cases.
- Backport bugs.
- Dream about a better package with a similar API.
"""
set_option("mode.chained_assignment", "raise")
obj_dtype = np.dtype("O")
# nor required for 1.3.0+
# backport https://github.com/pandas-dev/pandas/pull/34414
_MergeOperation._maybe_add_join_keys = _maybe_add_join_keys
def _convert_object_array(
content: List[Scalar], coerce_float: bool = False, dtype: Optional[DtypeObj] = None,
) -> List[Scalar]:
# safe=True avoids converting nullable integers to floats
def convert(arr):
if dtype != obj_dtype:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float, safe=True)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
pandas.core.internals.construction._convert_object_array = _convert_object_array
IndexOpsMixin.nonemin = nan_to_none_return(IndexOpsMixin.min)
IndexOpsMixin.nonemax = nan_to_none_return(IndexOpsMixin.max)
common.pandas_dtype = lru_cache()(common.pandas_dtype)
datetimes.pandas_dtype = common.pandas_dtype
common.is_dtype_equal = lru_cache()(common.is_dtype_equal)
datetimes.is_dtype_equal = common.is_dtype_equal
DatetimeTZDtype.utc = DatetimeTZDtype(tz=timezone.utc)
def cached_utc_new(cls, *args, **kwargs):
if not args and not kwargs:
return object.__new__(cls)
if not args and kwargs == {"tz": timezone.utc}:
return cls.utc
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
DatetimeTZDtype.__new__ = cached_utc_new
original_take = DatetimeLikeArrayMixin.take
def fast_take(self, indices, allow_fill=False, fill_value=None):
if len(indices) and indices.min() < 0:
return original_take(self, indices, allow_fill=allow_fill, fill_value=fill_value)
return original_take(self, indices, allow_fill=False)
DatetimeLikeArrayMixin.take = fast_take
original_tz_convert = DatetimeArray.tz_convert
def fast_tz_convert(self, tz):
if tz is None:
return self
return original_tz_convert(self, tz)
DatetimeArray.tz_convert = fast_tz_convert
original_get_take_nd_function = algorithms._get_take_nd_function
cached_get_take_nd_function = lru_cache()(algorithms._get_take_nd_function)
def _get_take_nd_function(ndim: int, arr_dtype, out_dtype, axis: int = 0, mask_info=None):
if mask_info is None or not mask_info[1]:
return cached_get_take_nd_function(ndim, arr_dtype, out_dtype, axis)
return original_get_take_nd_function(ndim, arr_dtype, out_dtype, axis, mask_info)
algorithms._get_take_nd_function = _get_take_nd_function
datetimes._validate_dt64_dtype = lru_cache()(datetimes._validate_dt64_dtype)
# https://github.com/pandas-dev/pandas/issues/35768
original_series_take = Series.take
def safe_take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:
kwargs.pop("fill_value", None)
kwargs.pop("allow_fill", None)
return original_series_take(self, indices, axis=axis, is_copy=is_copy, **kwargs)
Series.take = safe_take
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.left[name].dtype,
):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.right[name].dtype,
):
take_right = self.right[name]._values
elif left_indexer is not None and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algorithms.take_1d(take_left, left_indexer, fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill =
|
na_value_for_dtype(take_right.dtype)
|
pandas.core.dtypes.missing.na_value_for_dtype
|
import argparse
from PyQt5.QtCore import qChecksum
from numpy.core.numeric import False_
import finplot as fplt
import pandas as pd
import numpy as np
from collections import defaultdict
from matplotlib.markers import MarkerStyle as MS
import datetime
from functools import lru_cache
import pandas as pd
from PyQt5.QtWidgets import QComboBox, QCheckBox, QWidget
from pyqtgraph import QtGui
import pyqtgraph as pg
from copy import deepcopy
from . import observer_plot
dashboard_data = {}
ax, axo, ctrl_panel = '', '', ''
#kline_column_names = ["open_time", "open", "high", "low", "close", "volume", "close_time","quote_asset_volume",
# "nbum_of_trades", "taker_buy_base_ast_vol", "taker_buy_quote_ast_vol", "ignore"]
def calc_volume_profile(df, period, bins):
'''
Calculate a poor man's volume distribution/profile by "pinpointing" each kline volume to a certain
price and placing them, into N buckets. (IRL volume would be something like "trade-bins" per candle.)
The output format is a matrix, where each [period] time is a row index, and even columns contain
start (low) price and odd columns contain volume (for that price and time interval). See
finplot.horiz_time_volume() for more info.
'''
data = []
df['hlc3'] = (df.high + df.low + df.close) / 3 # assume this is volume center per each 1m candle
_,all_bins = pd.cut(df.hlc3, bins, right=False, retbins=True)
for _,g in df.groupby(pd.Grouper(key='open_time', freq=period)):
t = g['open_time'].iloc[0]
volbins =
|
pd.cut(g.hlc3, all_bins, right=False)
|
pandas.cut
|
import requests
import pandas as pd
import re
import datetime as dt
from bs4 import BeautifulSoup as bs
# comprehend list for years
years = [str(2000 + i) for i in range(5,19)]
this_year = '2019'
# print(years)
# multiple functions for cleaning data
# regex for finding round names
pattern = re.compile("^(Round|Week|Semifinal|Final|Qualifiers|Semis)(\ \d{1,2})?.*$")
def parse_date(date):
date = dt.datetime.strptime(date, '%d %b %Y')
return date
def outcome(f):
'''game outcome for home team V: victory L: loss D: draw'''
if f > 0:
return 'V'
elif f < 0:
return 'L'
elif f == 0:
return 'D'
else:
return 'D'
def fix_round(f):
'''extract round number or final type'''
if f[:4] == 'Week':
return f[5:7]
elif f[:5] == 'Round':
return f[6:8]
elif f[:10] == 'Qualifiers' or f[:13] == 'Quarterfinals':
return 'QF' # quarter final
elif f[:6] == 'Finals' or f == 'Semifinals' or f == 'Semis' or f == 'Semifinal':
return 'SF' # semi final
elif f[:6] == 'Final ' or f == 'Final':
return 'GF' # grand final
else:
return f.strip()
def team_loc(team):
au = ['Brumbies','Rebels','Reds','Sunwolves','Waratahs','Western Force']
nz = ['Blues','Chiefs','Crusaders','Highlanders','Hurricanes']
sa = ['Bulls','Jaguares','Lions','Sharks','Stormers','Cheetahs','Kings']
x = ''
if any(team in s for s in au):
x = 'au'
if any(team in s for s in nz):
x = 'nz'
if any(team in s for s in sa):
x = 'sa'
return x
def name_fixer(name):
if 'High' in name:
name = 'Highlanders'
if 'Wara' in name:
name = 'Waratahs'
if 'Storm' in name:
name = 'Stormers'
if 'Sunw' in name:
name = 'Sunwolves'
if 'Force' in name:
name = 'Western Force'
if 'Hurr' in name:
name = 'Hurricanes'
if 'Warra' in name:
name = 'Waratahs'
if 'Cheet' in name:
name = 'Cheetahs'
if 'Cats' in name:
name = 'Lions' # Team name change in 2005
return name
def data_nice(year):
table_nice = []
table_round = []
with open('data/data_' + year + '.txt') as f:
data = bs(f.read(), features='lxml')
rows = data.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols_nice = [ele.text.strip() for ele in cols]
cols_round = [x.text.strip() for x in cols if pattern.match(x.text.strip())]
table_nice.append([ele for ele in cols_nice if ele]) # Get rid of empty values
table_round.append([ele for ele in cols_round if ele]) # Get rid of empty values
df1 = pd.DataFrame(table_nice)
df2 = pd.DataFrame(table_round).fillna(method='ffill')
df = pd.concat([df1, df2], axis=1).dropna()
df['year'] = year
df.columns = ['date','teams','location','time','score','round','year']
df['date'] = df['date'] + ' ' + df['year']
df['home'] = df['teams'].str.split(' v ').str[0]
df['away'] = df['teams'].str.split(' v ').str[1]
df['home'] = df['home'].str.strip()
df['away'] = df['away'].str.strip()
df['home'] = [name_fixer(str(x)) for x in df['home']]
df['away'] = [name_fixer(str(x)) for x in df['away']]
df['home_loc'] = [team_loc(str(x)) for x in df['home']]
df['away_loc'] = [team_loc(str(x)) for x in df['away']]
df['fthp'] = df['score'].str.split('-').str[0].astype('int') # full time home points
df['ftap'] = df['score'].str.split('-').str[1].astype('int') # full time away points
df['ftr'] = [outcome(x) for x in df['fthp'] - df['ftap']] # home outcome ftr (full time result)
df['round'] = [fix_round(x) for x in df['round']]
remove_columns = ['teams','score','year','location','time']
df = df.drop(columns=remove_columns)
return df
# creating dataframes, cleaning up data:
df_2005 = data_nice('2005')
df_2006 = data_nice('2006')
df_2007 = data_nice('2007')
df_2008 = data_nice('2008')
df_2009 = data_nice('2009')
df_2010 = data_nice('2010')
df_2011 = data_nice('2011')
df_2012 = data_nice('2012')
df_2013 = data_nice('2013')
df_2014 = data_nice('2014')
df_2015 = data_nice('2015')
df_2016 = data_nice('2016')
df_2017 = data_nice('2017')
df_2018 = data_nice('2018')
df_2019 = data_nice('2019')
print(df_2010.columns)
# more fixing data inconsistancies
df_2005.loc[(df_2005['date'] == '28 May 2005'), 'round'] = "GF" # 2005 no final fixed
df_2006.drop(5, inplace=True) # remove bogus final data from 2006
df_2018.drop(10, inplace=True) # remove bogus final data from 2018
# List of series missing from each year
'''
au = ['Brumbies','Rebels','Reds','Sunwolves','Waratahs','Western Force']
nz = ['Blues','Chiefs','Crusaders','Highlanders','Hurricanes']
sa = ['Bulls','Jaguares','Lions','Sharks','Stormers','Cheetahs','Kings']
'''
missing_games_2007 = [pd.Series(['12 May 2007', 'SF', 'Sharks', 'Blues', 'sa', 'nz', 34, 18, 'V'], index=df_2007.columns ) ,
pd.Series(['12 May 2007', 'SF', 'Bulls', 'Crusaders', 'sa', 'nz', 27, 12, 'V'], index=df_2007.columns )]
missing_games_2008 = [pd.Series(['31 May 2008', 'GF', 'Crusaders', 'Waratahs', 'nz','au', 20, 12, 'V'], index=df_2008.columns ) ,
pd.Series(['24 May 2008', 'SF', 'Waratahs', 'Sharks', 'au' ,'sa', 28, 13, 'V'], index=df_2008.columns ),
pd.Series(['24 May 2008', 'SF', 'Crusaders', 'Hurricanes', 'nz','nz',33, 22, 'V'], index=df_2008.columns )]
missing_games_2017 = [pd.Series(['21 Jul 2017', 'QF', 'Brumbies', 'Hurricanes', 'au','nz',16, 35, 'L'], index=df_2017.columns ) ,
pd.Series(['22 Jul 2017', 'QF', 'Crusaders', 'Highlanders', 'nz','nz',17, 0, 'V'], index=df_2017.columns ),
pd.Series(['23 Jul 2017', 'QF', 'Lions', 'Sharks', 'sa','sa', 23, 21, 'V'], index=df_2017.columns ),
pd.Series(['23 Jul 2017', 'QF', 'Stormers', 'Chiefs', 'sa','nz',11, 17, 'L'], index=df_2017.columns )]
# Pass a list of series to the append() to add multiple rows to 2007
df_2007 = df_2007.append(missing_games_2007 , ignore_index=True)
df_2008 = df_2008.append(missing_games_2008 , ignore_index=True)
df_2017 = df_2017.append(missing_games_2017 , ignore_index=True)
df_2009.at[6, 'home'] = 'Chiefs'
df_2009.at[7, 'home'] = 'Bulls'
df_2009.at[8, 'home'] = 'Bulls'
df_2010.at[4, 'home'] = 'Bulls'
df_2013.at[2, 'home'] = 'Crusaders'
df_2013.at[3, 'home'] = 'Brumbies'
df_2013.at[4, 'home'] = 'Chiefs'
df_2013.at[5, 'home'] = 'Bulls'
df_2013.at[6, 'home'] = 'Chiefs'
df_2014.at[6, 'round'] = 'GF'
df_2014.at[2, 'round'] = 'QF'
df_2014.at[3, 'round'] = 'QF'
df_2015.at[2, 'round'] = 'QF'
df_2015.at[3, 'round'] = 'QF'
df_2015.at[6, 'round'] = 'GF'
df_2016.at[2, 'round'] = 'QF'
df_2016.at[3, 'round'] = 'QF'
df_2016.at[4, 'round'] = 'QF'
df_2016.at[5, 'round'] = 'QF'
df_2016.at[8, 'round'] = 'GF'
df_2016.at[6, 'home'] = 'Crusaders'
df_2016.at[7, 'home'] = 'Lions'
df_2018.at[152, 'round'] = 'QF'
df_2018.at[153, 'round'] = 'QF'
df_2018.at[154, 'round'] = 'QF'
df_2018.at[155, 'round'] = 'QF'
# parse dates and sort, reset indexes
df_2005.date = df_2005.date.apply(parse_date)
df_2006.date = df_2006.date.apply(parse_date)
df_2007.date = df_2007.date.apply(parse_date)
df_2008.date = df_2008.date.apply(parse_date)
df_2009.date = df_2009.date.apply(parse_date)
df_2010.date = df_2010.date.apply(parse_date)
df_2011.date = df_2011.date.apply(parse_date)
df_2012.date = df_2012.date.apply(parse_date)
df_2013.date = df_2013.date.apply(parse_date)
df_2014.date = df_2014.date.apply(parse_date)
df_2015.date = df_2015.date.apply(parse_date)
df_2016.date = df_2016.date.apply(parse_date)
df_2017.date = df_2017.date.apply(parse_date)
df_2018.date = df_2018.date.apply(parse_date)
# reset indexes
df_2005 = df_2005.sort_values(by=['date']).reset_index(drop=True)
df_2006 = df_2006.sort_values(by=['date']).reset_index(drop=True)
df_2007 = df_2007.sort_values(by=['date']).reset_index(drop=True)
df_2008 = df_2008.sort_values(by=['date']).reset_index(drop=True)
df_2009 = df_2009.sort_values(by=['date']).reset_index(drop=True)
df_2010 = df_2010.sort_values(by=['date']).reset_index(drop=True)
df_2011 = df_2011.sort_values(by=['date']).reset_index(drop=True)
df_2012 = df_2012.sort_values(by=['date']).reset_index(drop=True)
df_2013 = df_2013.sort_values(by=['date']).reset_index(drop=True)
df_2014 = df_2014.sort_values(by=['date']).reset_index(drop=True)
df_2015 = df_2015.sort_values(by=['date']).reset_index(drop=True)
df_2016 = df_2016.sort_values(by=['date']).reset_index(drop=True)
df_2017 = df_2017.sort_values(by=['date']).reset_index(drop=True)
df_2018 = df_2018.sort_values(by=['date']).reset_index(drop=True)
# get running sum of points and points conceded by round for home and away teams
# need to be up to that point/game (hence minus x:)
def get_cum_points(df):
# home team points scored htps
df['htps'] = df.groupby(['home'])['fthp'].apply(lambda x: x.cumsum() - x)
# home team points conceded htpc
df['htpc'] = df.groupby(['home'])['ftap'].apply(lambda x: x.cumsum() - x)
# away team points scored atps
df['atps'] = df.groupby(['away'])['ftap'].apply(lambda x: x.cumsum() - x)
# away team points conceded atpc
df['atpc'] = df.groupby(['away'])['fthp'].apply(lambda x: x.cumsum() - x)
return df
# Apply to each dataset
df_2005 = get_cum_points(df_2005)
df_2006 = get_cum_points(df_2006)
df_2007 = get_cum_points(df_2007)
df_2008 = get_cum_points(df_2008)
df_2009 = get_cum_points(df_2009)
df_2010 = get_cum_points(df_2010)
df_2011 = get_cum_points(df_2011)
df_2012 = get_cum_points(df_2012)
df_2013 = get_cum_points(df_2013)
df_2014 = get_cum_points(df_2014)
df_2015 = get_cum_points(df_2015)
df_2016 = get_cum_points(df_2016)
df_2017 = get_cum_points(df_2017)
df_2018 = get_cum_points(df_2018)
def get_home_points(ftr):
'''The most common bonus point system is:
4 points for winning a match.
2 points for drawing a match.
0 points for losing a match.
1 losing bonus point for losing by 7 points (or fewer)
1 try bonus point for scoring (at least) 3 tries more than the opponent.'''
points = 0
if ftr == 'V':
points += 4
elif ftr == 'D':
points += 2
else:
points += 0
return points
def get_away_points(ftr):
if ftr == 'V':
return 0
elif ftr == 'D':
return 2
else:
return 4
def get_cumcomp_points(df):
df['homepoint'] = [get_home_points(x) for x in df['ftr']]
df['awaypoint'] = [get_away_points(x) for x in df['ftr']]
df['htp'] = df.groupby(['home'])['homepoint'].apply(lambda x: x.cumsum() - x)
df['atp'] = df.groupby(['away'])['awaypoint'].apply(lambda x: x.cumsum() - x)
remove_columns = ['homepoint','awaypoint']
df = df.drop(columns=remove_columns)
return df
df_2005 = get_cumcomp_points(df_2005)
df_2006 = get_cumcomp_points(df_2006)
df_2007 = get_cumcomp_points(df_2007)
df_2008 = get_cumcomp_points(df_2008)
df_2009 = get_cumcomp_points(df_2009)
df_2010 = get_cumcomp_points(df_2010)
df_2011 = get_cumcomp_points(df_2011)
df_2012 = get_cumcomp_points(df_2012)
df_2013 = get_cumcomp_points(df_2013)
df_2014 = get_cumcomp_points(df_2014)
df_2015 = get_cumcomp_points(df_2015)
df_2016 = get_cumcomp_points(df_2016)
df_2017 = get_cumcomp_points(df_2017)
df_2018 = get_cumcomp_points(df_2018)
def opp_res(x):
if x == 'V':
return 'L'
elif x == 'L':
return 'V'
else:
return 'D'
def get_form(df):
''' gets last game result for last 5 games'''
# home form
df['hm1'] = df.groupby(['home'])['ftr'].shift(1).fillna('M')
df['hm2'] = df.groupby(['home'])['ftr'].shift(2).fillna('M')
df['hm3'] = df.groupby(['home'])['ftr'].shift(3).fillna('M')
df['hm4'] = df.groupby(['home'])['ftr'].shift(4).fillna('M')
df['hm5'] = df.groupby(['home'])['ftr'].shift(5).fillna('M')
# away form need to reverse result to get opposit...
df['am1'] = df.groupby(['away'])['ftr'].shift(1).fillna('M')
df['am2'] = df.groupby(['away'])['ftr'].shift(2).fillna('M')
df['am3'] = df.groupby(['away'])['ftr'].shift(3).fillna('M')
df['am4'] = df.groupby(['away'])['ftr'].shift(4).fillna('M')
df['am5'] = df.groupby(['away'])['ftr'].shift(5).fillna('M')
return df
# apply each dataset the form
df_2005 = get_form(df_2005)
df_2006 = get_form(df_2006)
df_2007 = get_form(df_2007)
df_2008 = get_form(df_2008)
df_2009 = get_form(df_2009)
df_2010 = get_form(df_2010)
df_2011 = get_form(df_2011)
df_2012 = get_form(df_2012)
df_2013 = get_form(df_2013)
df_2014 = get_form(df_2014)
df_2015 = get_form(df_2015)
df_2016 = get_form(df_2016)
df_2017 = get_form(df_2017)
df_2018 = get_form(df_2018)
# get round numbers for all rounds including quarters, semis, and finals
df_2005['rn'] = df_2005.groupby([True]*len(df_2005))['round'].transform(lambda x: pd.factorize(x)[0]+1)
df_2006['rn'] = df_2006.groupby([True]*len(df_2006))['round'].transform(lambda x: pd.factorize(x)[0]+1)
df_2007['rn'] = df_2007.groupby([True]*len(df_2007))['round'].transform(lambda x: pd.factorize(x)[0]+1)
df_2008['rn'] = df_2008.groupby([True]*len(df_2008))['round'].transform(lambda x: pd.factorize(x)[0]+1)
df_2009['rn'] = df_2009.groupby([True]*len(df_2009))['round'].transform(lambda x: pd.factorize(x)[0]+1)
df_2010['rn'] = df_2010.groupby([True]*len(df_2010))['round'].transform(lambda x:
|
pd.factorize(x)
|
pandas.factorize
|
import pandas as __pd
import datetime as __dt
from dateutil import relativedelta as __rd
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
import requests as __requests
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "production/"
def santraller(tarih=__dt.datetime.now().strftime("%Y-%m-%d")):
"""
İlgili tarihte EPİAŞ sistemine kayıtlı YEKDEM santral bilgilerini vermektedir.
Parametre
----------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Santral Bilgileri(Id, Adı, EIC Kodu, Kısa Adı)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "renewable-sm-licensed-power-plant-list?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["powerPlantList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu",
"shortName": "Kısa Adı"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def kurulu_guc(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden aylar için EPİAŞ sistemine kayıtlı YEKDEM santrallerin kaynak bazlı toplam
kurulu güç bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Kurulu Güç Bilgisi (Tarih, Kurulu Güç)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son and ilk <= __dt.datetime.today():
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__yekdem_kurulu_guc, date_list)
return
|
__pd.concat(df_list, sort=False)
|
pandas.concat
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
##############
# Data reading
##############
def original_data(thread_fp, nrows=None):
"""Read raw Twitter data"""
print("Loading threads from: %s" % thread_fp)
# reading in data
thread_df =
|
pd.read_csv(thread_fp, nrows=nrows)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
'''
turbine-13_helihoist-1_tom_geometry_hammerhead_2019-11-09-12-22-51_2019-11-10-12-59-19
turbine-13_sbitroot_tom_acc-vel-pos_hammerhead_2019-11-09-12-12-04_2019-11-09-15-25-59
turbine-13_sbittip_tom_acc-vel-pos_hammerhead_2019-11-09-12-13-17_2019-11-10-13-04-29
turbine-13_helihoist-1_tom_acc-vel-pos_sbi1_2019-11-10-12-59-20_2019-11-10-13-33-08
turbine-13_helihoist-1_tom_acc-vel-pos_tnhb1_2019-11-10-13-33-08_2019-11-16-05-29-59
turbine-13_helihoist-1_tom_geometry_tnhb1_2019-11-10-13-33-08_2019-11-16-05-29-59
turbine-13_sbitroot_tom_acc-vel-pos_tnhb1_2019-11-15-17-14-58_2019-11-16-05-36-40
turbine-13_sbittip_tom_acc-vel-pos_tnhb1_2019-11-10-13-31-42_2019-11-15-22-27-35
wmb-sued-2019-11-09
wmb-sued-2019-11-10
wmb-sued-2019-11-11
wmb-sued-2019-11-12
wmb-sued-2019-11-13
wmb-sued-2019-11-14
wmb-sued-2019-11-15
wmb-sued-2019-11-16
lidar_2019_11_09
lidar_2019_11_10
lidar_2019_11_11
lidar_2019_11_12
lidar_2019_11_13
lidar_2019_11_14
lidar_2019_11_15
lidar_2019_11_16 is missing
'''
#loading data and filling it into an array of all dataframes
hammerhead = sorted(glob('Daten/hammerhead/hammerhead/turbine-13**.csv'))
sbi1 = sorted(glob('Daten/sbi1/sbi1/turbine-13**.csv'))
sbi2 = sorted(glob('Daten/sbi2/sbi2/turbine-13**.csv'))
tnhb1 = sorted(glob('Daten/tnhb1/tnhb1/turbine-13**.csv'))
data = []
helihoist_tele_hammerhead = pd.read_csv(hammerhead[0], delimiter = ',')
helihoist_geo_hammerhead = pd.read_csv(hammerhead[1], delimiter = ',')
sbitroot_hammerhead = pd.read_csv(hammerhead[2], delimiter = ',')
sbitip_hammerhead = pd.read_csv(hammerhead[3], delimiter = ',')
data.append(helihoist_tele_hammerhead) , data.append(helihoist_geo_hammerhead), data.append(sbitroot_hammerhead) ,data.append(sbitip_hammerhead)
helihoist_sbi1 = pd.read_csv(sbi1[0], delimiter = ',')
data.append(helihoist_sbi1)
helihoist_tnhb1 = pd.read_csv(tnhb1[0], delimiter = ',')
helihoist_geo_tnhb1 = pd.read_csv(tnhb1[1], delimiter = ',')
sbiroot_tnhb1 = pd.read_csv(tnhb1[2], delimiter = ',')
sbitip_tnhb1 = pd.read_csv(tnhb1[3], delimiter = ',')
data.append(helihoist_tnhb1) ,data.append(helihoist_geo_tnhb1) ,data.append(sbiroot_tnhb1),data.append(sbitip_tnhb1)
wmb1= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-09.csv', delimiter = ' ')
wmb2= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-10.csv', delimiter = ' ')
wmb3= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-11.csv', delimiter = ' ')
wmb4= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-12.csv', delimiter = ' ')
wmb5= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-13.csv', delimiter = ' ')
wmb6= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-14.csv', delimiter = ' ')
wmb7= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-15.csv', delimiter = ' ')
wmb8= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-11-16.csv', delimiter = ' ')
#besonders auf 11/12 und 15 achten **TODO** entfernen
data.append(wmb1), data.append(wmb2), data.append(wmb3), data.append(wmb4), data.append(wmb5), data.append(wmb6), data.append(wmb7), data.append(wmb8)
wmb_all = []
wmb_all.append(wmb1), wmb_all.append(wmb2), wmb_all.append(wmb3), wmb_all.append(wmb4)
lidar1= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-09.csv', delimiter = ' ')
lidar2= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-10.csv', delimiter = ' ')
lidar3= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-11.csv', delimiter = ' ')
lidar4= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-12.csv', delimiter = ' ')
lidar5= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-13.csv', delimiter = ' ')
lidar6= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-14.csv', delimiter = ' ')
lidar7= pd.read_csv('environment/environment/wind/lidar/lidar_2019-11-15.csv', delimiter = ' ')
lidar_all =[]
lidar_all.append(lidar1), lidar_all.append(lidar2), lidar_all.append(lidar3), lidar_all.append(lidar4), lidar_all.append(lidar5), lidar_all.append(lidar6), lidar_all.append(lidar7),
buffer1 = []
for i in wmb_all:
i.columns = (
'epoch', 'Tp', 'Dirp', 'Sprp', 'Tz', 'Hm0', 'TI', 'T1', 'Tc', 'Tdw2', 'Tdw1', 'Tpc', 'nu', 'eps', 'QP', 'Ss',
'TRef', 'TSea', 'Bat', 'Percentage', 'Hmax', 'Tmax', 'H(1/10)', 'T(1/10)', 'H(1/3)', 'T(1/3)', 'Hav', 'Tav', 'Eps',
'#Waves')
buffer1.append(i)
wmb = pd.concat(buffer1, axis=0)
wmb.columns = (
'epoch', 'Tp', 'Dirp', 'Sprp', 'Tz', 'Hm0', 'TI', 'T1', 'Tc', 'Tdw2', 'Tdw1', 'Tpc', 'nu', 'eps', 'QP', 'Ss',
'TRef', 'TSea', 'Bat', 'Percentage', 'Hmax', 'Tmax', 'H(1/10)', 'T(1/10)', 'H(1/3)', 'T(1/3)', 'Hav', 'Tav', 'Eps',
'#Waves')
buffer2 = []
for j in lidar_all:
j.columns = ('epoch', 'wind_speed_0', 'wind_dir_0', 'wind_dir_0_corr', 'height_0', 'wind_speed_1', 'wind_dir_1',
'wind_dir_1_corr', 'height_1', 'wind_speed_2', 'wind_dir_2', 'wind_dir_2_corr', 'height_2',
'wind_speed_3', 'wind_dir_3', 'wind_dir_3_corr', 'height_3', 'wind_speed_4', 'wind_dir_4',
'wind_dir_4_corr', 'height_4', 'wind_speed_5', 'wind_dir_5', 'wind_dir_5_corr', 'height_5',
'wind_speed_6', 'wind_dir_6', 'wind_dir_6_corr', 'height_6', 'wind_speed_7', 'wind_dir_7',
'wind_dir_7_corr', 'height_7', 'wind_speed_8', 'wind_dir_8', 'wind_dir_8_corr', 'height_8',
'wind_speed_9', 'wind_dir_9', 'wind_dir_9_corr', 'height_9', 'wind_speed_10', 'wind_dir_10',
'wind_dir_10_corr', 'height_10', 'heading')
buffer2.append(j)
lidar = pd.concat(buffer2, axis=0)
lidar.columns = ('epoch', 'wind_speed_0', 'wind_dir_0', 'wind_dir_0_corr', 'height_0', 'wind_speed_1', 'wind_dir_1',
'wind_dir_1_corr', 'height_1', 'wind_speed_2', 'wind_dir_2', 'wind_dir_2_corr', 'height_2',
'wind_speed_3', 'wind_dir_3', 'wind_dir_3_corr', 'height_3', 'wind_speed_4', 'wind_dir_4',
'wind_dir_4_corr', 'height_4', 'wind_speed_5', 'wind_dir_5', 'wind_dir_5_corr', 'height_5',
'wind_speed_6', 'wind_dir_6', 'wind_dir_6_corr', 'height_6', 'wind_speed_7', 'wind_dir_7',
'wind_dir_7_corr', 'height_7', 'wind_speed_8', 'wind_dir_8', 'wind_dir_8_corr', 'height_8',
'wind_speed_9', 'wind_dir_9', 'wind_dir_9_corr', 'height_9', 'wind_speed_10', 'wind_dir_10',
'wind_dir_10_corr', 'height_10', 'heading')
UTC = []
for k in range(len(wmb)):
UTC.append(pd.Timestamp.fromtimestamp(wmb.iloc[k, 0]))
wmb['epoch'] = UTC
wmb.index = wmb['epoch']
del wmb['epoch']
wmb = wmb.resample('3S', label='left').mean().pad() / 1800
wmb = wmb
UTC = []
for k in range(len(lidar)):
UTC.append(pd.Timestamp.fromtimestamp(lidar.iloc[k, 0]))
lidar['epoch'] = UTC
lidar.index = lidar['epoch']
del lidar['epoch']
lidar = lidar.resample('3S', label='left').mean().pad()
lidar = lidar
#generating timestamps for every dataframe
counter = 0
for df in data:
UTC = []
for k in range(len(df)):
UTC.append(
|
pd.Timestamp.fromtimestamp(df.iloc[k, 0])
|
pandas.Timestamp.fromtimestamp
|
from tqdm import tqdm
import pandas as pd
import numpy as np
from IPython import embed
np.random.seed(0)
def get_arrythmias(arrythmias_path):
'''
read labels
:param: file path
:return: list
'''
with open(arrythmias_path, "r") as f:
data = f.readlines()
arrythmias = [d.strip() for d in data]
print(len(arrythmias))
return arrythmias
def get_dict(arrythmias_path):
'''
build a dictionary for conversion
:param path: file path
:return: dict
'''
arrythmias = get_arrythmias(arrythmias_path)
str2ids = {}
id2strs = {}
for i, a in enumerate(arrythmias):
str2ids[a] = i
id2strs[i] = a
return str2ids, id2strs
def get_train_label(label_path, str2ids, train_csv_path, validation_csv_path,
trainval_csv_path, train_len):
'''
get train label
:param path: file path, int
'''
with open(label_path, "r", encoding='UTF-8') as f:
data = f.readlines()
labels = [d.strip() for d in data]
label_dicts = {}
label_dicts["index"] = []
label_dicts["age"] = []
label_dicts["sex"] = []
label_dicts["one_label"] = []
i = 0
for l in tqdm(labels):
i += 1
ls = l.split("\t")
if len(ls) <= 1:
continue
label_dicts["index"].append(ls[0])
label_dicts["age"].append(ls[1])
label_dicts["sex"].append(ls[2])
one_label = np.zeros(len(str2ids),)
for ls1 in ls[3:]:
one_label[str2ids[ls1]] = 1
label_dicts["one_label"].append(list(one_label))
df =
|
pd.DataFrame(label_dicts)
|
pandas.DataFrame
|
import requests
from typing import List
import re
# from nciRetriever.updateFC import updateFC
# from nciRetriever.csvToArcgisPro import csvToArcgisPro
# from nciRetriever.geocode import geocodeSites
# from nciRetriever.createRelationships import createRelationships
# from nciRetriever.zipGdb import zipGdb
# from nciRetriever.updateItem import update
# from nciRetriever.removeTables import removeTables
from datetime import date
import pandas as pd
import logging
from urllib.parse import urljoin
import json
import time
import sys
import os
from pprint import pprint
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
today = date.today()
# nciThesaurus = pd.read_csv('thesaurus.csv')
# uniqueMainDiseasesDf = pd.read_csv('nciUniqueMainDiseasesReference.csv')
# uniqueSubTypeDiseasesDf = pd.read_csv('nciUniqueSubTypeDiseasesReference.csv')
# uniqueDiseasesWithoutSynonymsDf = pd.read_csv('nciUniqueDiseasesWithoutSynonymsReference.csv')
def createTrialDict(trial: dict) -> dict:
trialDict = {'nciId': trial['nci_id'],
'protocolId': trial['protocol_id'],
'nctId': trial['nct_id'],
'detailDesc': trial['detail_description'],
'officialTitle': trial['official_title'],
'briefTitle': trial['brief_title'],
'briefDesc': trial['brief_summary'],
'phase': trial['phase'],
'leadOrg': trial['lead_org'],
'amendmentDate': trial['amendment_date'],
'primaryPurpose': trial['primary_purpose'],
'currentTrialStatus': trial['current_trial_status'],
'startDate': trial['start_date']}
if 'completion_date' in trial.keys():
trialDict.update({'completionDate': trial['completion_date']})
if 'active_sites_count' in trial.keys():
trialDict.update({'activeSitesCount': trial['active_sites_count']})
if 'max_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'maxAgeInYears': int(trial['eligibility']['structured']['max_age_in_years'])})
if 'min_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'minAgeInYears': int(trial['eligibility']['structured']['min_age_in_years']) if trial['eligibility']['structured']['min_age_in_years'] is not None else None})
if 'gender' in trial['eligibility']['structured'].keys():
trialDict.update({'gender': trial['eligibility']['structured']['gender']})
if 'accepts_healthy_volunteers' in trial['eligibility']['structured'].keys():
trialDict.update({'acceptsHealthyVolunteers': trial['eligibility']['structured']['accepts_healthy_volunteers']})
if 'study_source' in trial.keys():
trialDict.update({'studySource': trial['study_source']})
if 'study_protocol_type' in trial.keys():
trialDict.update({'studyProtocolType': trial['study_protocol_type']})
if 'record_verification_date' in trial.keys():
trialDict.update({'recordVerificationDate': trial['record_verification_date']})
return trialDict
def createSiteDict(trial:dict, site:dict) -> dict:
siteDict = {'nciId': trial['nci_id'],
'orgStateOrProvince': site['org_state_or_province'],
'contactName': site['contact_name'],
'contactPhone': site['contact_phone'],
'recruitmentStatusDate': site['recruitment_status_date'],
'orgAddressLine1': site['org_address_line_1'],
'orgAddressLine2': site['org_address_line_2'],
'orgVa': site['org_va'],
'orgTty': site['org_tty'],
'orgFamily': site['org_family'],
'orgPostalCode': site['org_postal_code'],
'contactEmail': site['contact_email'],
'recruitmentStatus': site['recruitment_status'],
'orgCity': site['org_city'],
'orgEmail': site['org_email'],
'orgCountry': site['org_country'],
'orgFax': site['org_fax'],
'orgPhone': site['org_phone'],
'orgName': site['org_name']
}
# if 'org_coordinates' in site.keys():
# siteDict['lat'] = site['org_coordinates']['lat']
# siteDict['long'] = site['org_coordinates']['lon']
return siteDict
def createBiomarkersDicts(trial:dict, marker:dict) -> List[dict]:
parsedBiomarkers = []
for name in [*marker['synonyms'], marker['name']]:
biomarkerDict = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': name,
'assayPurpose': marker['assay_purpose']
}
if 'eligibility_criterion' in marker.keys():
biomarkerDict.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
biomarkerDict.update({'inclusionIndicator': marker['inclusion_indicator']})
parsedBiomarkers.append(biomarkerDict)
return parsedBiomarkers
def createMainBiomarkersDict(trial:dict, marker:dict) -> dict:
parsedBiomarker = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': marker['name'],
'assayPurpose': marker['assay_purpose'],
}
if 'eligibility_criterion' in marker.keys():
parsedBiomarker.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
parsedBiomarker.update({'inclusionIndicator': marker['inclusion_indicator']})
return parsedBiomarker
def createDiseasesDicts(trial:dict, disease:dict) -> List[dict]:
parsedDiseases = []
try:
names = [disease['name']]
if 'synonyms' in disease.keys():
names.extend(disease['synonyms'])
except KeyError:
logger.error(f'Invalid key for diseases. Possible keys: {disease.keys()}')
return parsedDiseases
for name in names:
diseaseDict = {
'inclusionIndicator': disease['inclusion_indicator'],
'isLeadDisease': disease['is_lead_disease'],
'name': name,
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'nciId': trial['nci_id']
}
parsedDiseases.append(diseaseDict)
return parsedDiseases
def createMainToSubTypeRelDicts(trial:dict, disease:dict) -> List[dict]:
if 'subtype' not in disease['type']:
return []
relDicts = []
for parent in disease['parents']:
relDicts.append({
'maintype': parent,
'subtype': disease['nci_thesaurus_concept_id']
})
return relDicts
def createDiseasesWithoutSynonymsDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueDiseasesWithoutSynonymsDf.loc[uniqueDiseasesWithoutSynonymsDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# logger.error('Disease not found in full reference. Aborting insertion...')
# return {}
# # logger.debug(correctDisease['name'].values[0])
# # time.sleep(2)
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createMainDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueMainDiseasesDf.loc[uniqueMainDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'maintype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createSubTypeDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueSubTypeDiseasesDf.loc[uniqueSubTypeDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'subtype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for subtype diseases. Not adding to list...')
return {}
def createArmsDict(trial:dict, arm:dict) -> dict:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
return {
'nciId': trial['nci_id'],
'name': arm['name'],
'nciIdWithName': f'{trial["nci_id"]}_{parsedArm}',
'description': arm['description'],
'type': arm['type']
}
def createInterventionsDicts(trial:dict, arm:dict) -> List[dict]:
parsedInterventions = []
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
for intervention in arm['interventions']:
names = intervention['synonyms']
if 'name' in intervention.keys():
names.append(intervention['name'])
elif 'intervention_name' in intervention.keys():
names.append(intervention['intervention_name'])
for name in names:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError as e:
logger.exception(e)
logger.error(f'Invalid intervention keys. Possible keys are: {intervention.keys()}')
continue
parsedInterventions.append(interventionDict)
return parsedInterventions
def createMainInterventionDicts(trial:dict, arm:dict) -> List[dict]:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
parsedMainInterventions = []
for intervention in arm['interventions']:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['intervention_name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError:
logger.error(f'Unexpected intervention keys: {intervention.keys()}. Not inserting...')
continue
parsedMainInterventions.append(mainInterventionDict)
return parsedMainInterventions
def deDuplicateTable(csvName:str, deduplicationList:List[str]):
df = pd.read_csv(csvName)
df.drop_duplicates(subset=deduplicationList, inplace=True)
df.to_csv(csvName, index=False)
def correctMainToSubTypeTable(today):
mainDf = pd.read_csv(f'nciUniqueMainDiseases{today}.csv')
subTypeDf = pd.read_csv(f'nciUniqueSubTypeDiseases{today}.csv')
relDf = pd.read_csv(f'MainToSubTypeRelTable{today}.csv')
for idx, row in relDf.iterrows():
parentId = row['maintype']
if parentId in mainDf['nciThesaurusConceptId'].values:
continue
elif parentId in subTypeDf['nciThesaurusConceptId'].values:
while True:
possibleMainTypesDf = relDf[relDf['subtype'] == parentId]
if possibleMainTypesDf.empty:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
#setting the parentId value with the parent of the subtype found
for value in possibleMainTypesDf['maintype'].values:
if parentId == value:
continue
parentId = value
break
else:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
# parentId = possibleMainTypesDf['maintype'].values[0]
if parentId in mainDf['nciThesaurusConceptId'].values:
break
if parentId == '':
continue
relDf.iloc[idx]['maintype'] = parentId
else:
pass
relDf.to_csv(f'MainToSubTypeRelTable{today}.csv', index=False)
# logger.error(f'maintype id {parentId} is not found in main diseases or subtype diseases')
def createUniqueSitesCsv(today):
logger.debug('Reading sites...')
sitesDf = pd.read_csv(f'nciSites{today}.csv')
logger.debug('Dropping duplicates and trial-depedent information...')
sitesDf.drop_duplicates(subset='orgName', inplace=True)
sitesDf.drop(['recruitmentStatusDate', 'recruitmentStatus', 'nciId'], axis=1, inplace=True)
logger.debug('Saving unique sites table...')
sitesDf.to_csv(f'nciUniqueSites{today}.csv', index=False)
def createUniqueDiseasesWithoutSynonymsCsv(today):
logger.debug('Reading diseases without synonyms...')
diseasesWithoutSynonymsDf = pd.read_csv(f'nciDiseasesWithoutSynonyms{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
diseasesWithoutSynonymsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
diseasesWithoutSynonymsDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
diseasesWithoutSynonymsDf.dropna()
logger.debug('Saving unique diseases table...')
diseasesWithoutSynonymsDf.to_csv(f'nciUniqueDiseasesWithoutSynonyms{today}.csv', index=False)
def createUniqueMainDiseasesCsv(today):
logger.debug('Reading main diseases...')
mainDiseasesDf = pd.read_csv(f'nciMainDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
mainDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
mainDiseasesDf.to_csv(f'nciUniqueMainDiseases{today}.csv', index=False)
def createUniqueSubTypeDiseasesCsv(today):
logger.debug('Reading main diseases...')
subTypeDiseasesDf = pd.read_csv(f'nciSubTypeDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
subTypeDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
subTypeDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
subTypeDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
subTypeDiseasesDf.to_csv(f'nciUniqueSubTypeDiseases{today}.csv', index=False)
def createUniqueBiomarkersCsv(today):
logger.debug('Reading main biomarkers...')
mainBiomarkersDf = pd.read_csv(f'nciMainBiomarkers{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainBiomarkersDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainBiomarkersDf.drop(['eligibilityCriterion', 'inclusionIndicator', 'assayPurpose', 'nciId'], axis=1, inplace=True)
mainBiomarkersDf.dropna()
logger.debug('Saving unique biomarkers table...')
mainBiomarkersDf.to_csv(f'nciUniqueMainBiomarkers{today}.csv', index=False)
def createUniqueInterventionsCsv(today):
logger.debug('Reading main interventions...')
mainInterventionsDf = pd.read_csv(f'nciMainInterventions{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainInterventionsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainInterventionsDf.drop(['nciId', 'inclusionIndicator', 'arm', 'nciIdWithArm'], axis=1, inplace=True)
mainInterventionsDf.dropna()
logger.debug('Saving unique interventions table...')
mainInterventionsDf.to_csv(f'nciUniqueMainInterventions{today}.csv', index=False)
def retrieveToCsv():
baseUrl = r'https://clinicaltrialsapi.cancer.gov/api/v2/'
with open('./nciRetriever/secrets/key.txt', 'r') as f:
apiKey = f.read()
headers = {
'X-API-KEY': apiKey,
'Content-Type': 'application/json'
}
trialEndpoint = urljoin(baseUrl, 'trials')
logger.debug(trialEndpoint)
#sending initial request to get the total number of trials
trialsResponse = requests.get(trialEndpoint, headers=headers, params={'trial_status': 'OPEN'})
trialsResponse.raise_for_status()
trialJson = trialsResponse.json()
totalNumTrials = trialJson['total']
logger.debug(f'Total number of trials: {totalNumTrials}')
start = time.perf_counter()
createdTrialCsv = False
createdSiteCsv = False
createdEligibilityCsv = False
createdBiomarkerCsv = False
createdMainBiomarkerCsv = False
createdDiseaseCsv = False
createdMainToSubTypeRelTableCsv = False
createdDiseaseWithoutSynonymsCsv = False
createdMainDiseaseCsv = False
createdSubTypeDiseaseCsv = False
createdArmsCsv = False
createdInterventionCsv = False
createdMainInterventionCsv = False
for trialNumFrom in range(0, totalNumTrials, 50):
sectionStart = time.perf_counter()
#creating the dataframes again after every 50 trials to avoid using too much memory
trialsDf = pd.DataFrame(columns=['protocolId',
'nciId',
'nctId',
'detailDesc',
'officialTitle',
'briefTitle',
'briefDesc',
'phase',
'leadOrg',
'amendmentDate',
'primaryPurpose',
'activeSitesCount',
'currentTrialStatus',
'startDate',
'completionDate',
'maxAgeInYears',
'minAgeInYears',
'gender',
'acceptsHealthyVolunteers',
'studySource',
'studyProtocolType',
'recordVerificationDate'])
sitesDf = pd.DataFrame(columns=['nciId',
'orgStateOrProvince',
'contactName',
'contactPhone',
'recruitmentStatusDate',
'orgAddressLine1',
'orgAddressLine2',
'orgVa',
'orgTty',
'orgFamily',
'orgPostalCode',
'contactEmail',
'recruitmentStatus',
'orgCity',
'orgEmail',
'orgCounty',
'orgFax',
'orgPhone',
'orgName'])
eligibilityDf = pd.DataFrame(columns=['nciId',
'inclusionIndicator',
'description'])
biomarkersDf = pd.DataFrame(columns=[
'nciId',
'eligibilityCriterion',
'inclusionIndicator',
'nciThesaurusConceptId',
'name',
'assayPurpose'
])
mainBiomarkersDf = pd.DataFrame(columns=[
'nciId',
'eligibilityCriterion',
'inclusionIndicator',
'nciThesaurusConceptId',
'name',
'assayPurpose'
])
diseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
mainToSubTypeRelsDf = pd.DataFrame(columns=[
'maintype',
'subtype'
])
mainDiseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
diseasesWithoutSynonymsDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
subTypeDiseasesDf = pd.DataFrame(columns=[
'nciId',
'inclusionIndicator',
'isLeadDisease',
'nciThesaurusConceptId',
'name'
])
armsDf = pd.DataFrame(columns=[
'nciId',
'name',
'nciIdWithName',
'description',
'type'
])
interventionsDf = pd.DataFrame(columns=[
'nciId',
'arm',
'nciIdWithArm',
'type',
'inclusionIndicator',
'name',
'category',
'nciThesaurusConceptId',
'description'
])
mainInterventionsDf = pd.DataFrame(columns=[
'nciId',
'arm',
'nciIdWithArm',
'type',
'inclusionIndicator',
'name',
'category',
'nciThesaurusConceptId',
'description'
])
payload = {
'size': 50,
'trial_status': 'OPEN',
'from': trialNumFrom
}
response = requests.get(trialEndpoint, headers=headers, params=payload)
response.raise_for_status()
sectionJson = response.json()
trials = []
for trial in sectionJson['data']:
trials.append(createTrialDict(trial))
if trial['eligibility']['unstructured'] is not None:
#parsing the unstructured eligibility information from the trial
eligibilityInfo = []
for condition in trial['eligibility']['unstructured']:
eligibilityInfo.append({
'nciId': trial['nci_id'],
'inclusionIndicator': condition['inclusion_indicator'],
'description': condition['description']
})
conditionDf = pd.DataFrame.from_records(eligibilityInfo)
eligibilityDf = pd.concat([eligibilityDf, conditionDf], verify_integrity=True, ignore_index=True)
if trial['sites'] is not None:
#parsing the sites associated with the trial
sites = []
for site in trial['sites']:
sites.append(createSiteDict(trial, site))
siteDf = pd.DataFrame.from_records(sites)
sitesDf = pd.concat([sitesDf, siteDf], ignore_index=True, verify_integrity=True)
if trial['biomarkers'] is not None:
#parsing the biomarkers associated with the trial
biomarkers = []
mainBiomarkers = []
for biomarker in trial['biomarkers']:
# biomarkers.extend(createBiomarkersDicts(trial, biomarker))
mainBiomarkersDict = createMainBiomarkersDict(trial, biomarker)
if mainBiomarkersDict != {}:
mainBiomarkers.append(mainBiomarkersDict)
# biomarkerDf = pd.DataFrame.from_records(biomarkers)
# biomarkersDf = pd.concat([biomarkersDf, biomarkerDf], ignore_index=True, verify_integrity=True)
mainBiomarkerDf =
|
pd.DataFrame.from_records(mainBiomarkers)
|
pandas.DataFrame.from_records
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
|
tm.assert_index_equal(result, exp)
|
pandas.util.testing.assert_index_equal
|
# -*- coding: utf-8 -*-
"""
Updated 6/5/2020
This is modified from <NAME> original code, to input flow and precip data
compute information measures for entire time window
primary metrics: mutual information, dominant lag time, threhsold, specific information values
Inputs: CPC gage-based gridded data for CO Headwaters HUC4 basin region, USGS flow rate data
#functions: comput_info_measures.py and compute_icrit.py for IT computations
Outputs:
@author: <NAME>, Mozhgan
"""
import pickle
import numpy as np
import pandas as pd
import time
from itertools import chain
from compute_icrit import compute_icrit as ci
from compute_info_measures import compute_info_measures as cim
start = time.time()
#designate bins for P, xbin, and Q, ybin (change it from 5 to 17)
xbin = 2
ybin = 5
#precipitation thresholds (mm per day)
thresholds = [0.3,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
#designate max lag
delay = 7
start_year1 = 1953
end_year1 = 2016
#create year, month, day vectors
ndays_per_month = np.asfarray([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
yearlist = []
monthlist = []
daylist = []
seasonlist = []
#define seasons
for y in range (start_year1-1,end_year1):
for m in range (0,12):
if y in range (1952, 2018, 4) and m == 1 or y == 1952 and m == 1:
length = int(29)
else:
length = int(ndays_per_month[m])
if m in range(0,2) or m ==11: #winter
s = 1
elif m in range(2,5):
s = 2
elif m in range(5,8):
s = 3
else:
s = 4
for d in range (1,length+1):
daylist.append(d)
monthlist.append(m+1)
yearlist.append(y)
seasonlist.append(s)
dfyear = pd.DataFrame(yearlist, columns = ['year'])
dfmonth = pd.DataFrame(monthlist, columns = ['month'])
dfday = pd.DataFrame(daylist, columns = ['day'])
dfseason = pd.DataFrame(seasonlist, columns = ['season'])
dfdate = pd.concat([dfyear, dfmonth, dfday, dfseason], axis = 1)
dateyrarray = np.array([[yearlist]])
#load streamflow data (It could be "COHW flow rates" & "difference between COHW and Gunnison flow rates")
dfsf1 = pd.read_csv('COHW Gage Data 1952-2017.csv', usecols=[4])
dfsf1.rename(columns={'difference': 'flow'}, inplace=True)
dfsf1_list = dfsf1['flow']
dfsf1_list = dfsf1_list.tolist()
dfsf1_series = pd.Series(dfsf1_list)
dfsf1_array = np.array(dfsf1_series)
#load gridded rainfall data
f_myfile = open('CO_rainfall_data.pickle','rb')
Co_pptdata = pickle.load(f_myfile) #ppt_long_list = rainfall data [0] and percentiles [1]
lat = pickle.load(f_myfile)
lon = pickle.load(f_myfile)
f_myfile.close()
dfppt =
|
pd.DataFrame(Co_pptdata)
|
pandas.DataFrame
|
import unittest
import copy
import numpy as np
import numpy.testing as np_test
import pandas as pd
import pandas.testing as pd_test
import warnings
from pyblackscholesanalytics.market.market import MarketEnvironment
from pyblackscholesanalytics.options.options import PlainVanillaOption, DigitalOption
from pyblackscholesanalytics.utils.utils import scalarize
class TestPlainVanillaOption(unittest.TestCase):
"""Class to test public methods of PlainVanillaOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = PlainVanillaOption(mkt_env)
self.put_opt = PlainVanillaOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 7.548381716811839
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 4.672730506407959
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]])
np_test.assert_allclose(test_put, expected_put)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 4.060979245868182
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -5.368600081057167
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]])
np_test.assert_allclose(test_put, expected_put)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_delta_scalar(self):
"""Test Delta - scalar case"""
# call
test_call = scalarize(self.call_opt.delta(**self.scalar_params))
expected_call = 0.6054075531684143
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.delta(**self.scalar_params))
expected_put = -0.3945924468315857
self.assertEqual(test_put, expected_put)
def test_delta_vector_np(self):
"""Test Delta - np.ndarray output case"""
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = np.array([[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = np.array([[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]])
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
def test_delta_vector_df(self):
"""Test Delta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_gamma_scalar(self):
"""Test Gamma - scalar case"""
# call
test_call = scalarize(self.call_opt.gamma(**self.scalar_params))
expected_call = 0.025194958512498786
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.gamma(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put gamma coincide
self.assertEqual(test_call, test_put)
def test_gamma_vector_np(self):
"""Test Gamma - np.ndarray output case"""
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = np.array([[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-6)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
# assert call and put gamma coincide
np_test.assert_allclose(test_call, test_put)
def test_gamma_vector_df(self):
"""Test Gamma - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put)
# assert call and put gamma coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_vega_scalar(self):
"""Test Vega - scalar case"""
# call
test_call = scalarize(self.call_opt.vega(**self.scalar_params))
expected_call = 0.29405622811847903
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.vega(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put vega coincide
self.assertEqual(test_call, test_put)
def test_vega_vector_np(self):
"""Test Vega - np.ndarray output case"""
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = np.array([[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
# assert call and put vega coincide
np_test.assert_allclose(test_call, test_put)
def test_vega_vector_df(self):
"""Test Vega - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
# assert call and put vega coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_theta_scalar(self):
"""Test Theta - scalar case"""
# call
test_call = scalarize(self.call_opt.theta(**self.scalar_params))
expected_call = -0.021064685979455443
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.theta(**self.scalar_params))
expected_put = -0.007759980665812141
self.assertEqual(test_put, expected_put)
def test_theta_vector_np(self):
"""Test Theta - np.ndarray output case"""
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = np.array([[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-4)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = np.array([[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_theta_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = pd.DataFrame(data=[[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_rho_scalar(self):
"""Test Rho - scalar case"""
# call
test_call = scalarize(self.call_opt.rho(**self.scalar_params))
expected_call = 0.309243166487844
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.rho(**self.scalar_params))
expected_put = -0.2575372798733608
self.assertEqual(test_put, expected_put)
def test_rho_vector_np(self):
"""Test Rho - np.ndarray output case"""
# call
test_call = self.call_opt.rho(**self.vector_params)
expected_call = np.array([[2.08128741e-01, 3.72449469e-01, 5.12209444e-01],
[1.39670999e-01, 2.81318986e-01, 4.02292404e-01],
[7.76651463e-02, 1.91809707e-01, 2.90026614e-01],
[2.49657984e-02, 1.01399432e-01, 1.68411513e-01],
[2.17415573e-05, 1.39508485e-02, 2.73093423e-02]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.rho(**self.vector_params)
expected_put = np.array([[-4.69071412e-01, -3.04750685e-01, -1.64990710e-01],
[-3.77896910e-01, -2.36248923e-01, -1.15275505e-01],
[-2.80139757e-01, -1.65995197e-01, -6.77782897e-02],
[-1.67672008e-01, -9.12383748e-02, -2.42262934e-02],
[-2.73380139e-02, -1.34089069e-02, -5.04131783e-05]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_rho_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.rho(**self.vector_params)
expected_call = pd.DataFrame(data=[[2.08128741e-01, 3.72449469e-01, 5.12209444e-01],
[1.39670999e-01, 2.81318986e-01, 4.02292404e-01],
[7.76651463e-02, 1.91809707e-01, 2.90026614e-01],
[2.49657984e-02, 1.01399432e-01, 1.68411513e-01],
[2.17415573e-05, 1.39508485e-02, 2.73093423e-02]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.rho(**self.vector_params)
expected_put = pd.DataFrame(data=[[-4.69071412e-01, -3.04750685e-01, -1.64990710e-01],
[-3.77896910e-01, -2.36248923e-01, -1.15275505e-01],
[-2.80139757e-01, -1.65995197e-01, -6.77782897e-02],
[-1.67672008e-01, -9.12383748e-02, -2.42262934e-02],
[-2.73380139e-02, -1.34089069e-02, -5.04131783e-05]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_Implied_Vol_scalar(self):
"""Test Implied Volatility - scalar case"""
# call
test_call = scalarize(self.call_opt.implied_volatility(**self.scalar_params))
expected_call = 0.2
self.assertAlmostEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.implied_volatility(**self.scalar_params))
expected_put = 0.2
self.assertAlmostEqual(test_put, expected_put)
def test_Implied_Vol_vector_np(self):
"""Test Implied Volatility - np.ndarray output case"""
# call
test_call = self.call_opt.implied_volatility(**self.vector_params)
expected_call = 0.2 + np.zeros_like(test_call)
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.implied_volatility(**self.vector_params)
expected_put = 0.2 + np.zeros_like(test_put)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_Implied_Vol_vector_df(self):
"""Test Implied Volatility - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.implied_volatility(**self.vector_params)
expected_call = pd.DataFrame(data=0.2 + np.zeros_like(test_call),
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.implied_volatility(**self.vector_params)
expected_put = pd.DataFrame(data=0.2 + np.zeros_like(test_put),
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_complex_parameters_setup(self):
"""
Test complex parameter setup:
(S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
"""
# call
test_call_price = self.call_opt.price(**self.complex_params)
test_call_PnL = self.call_opt.PnL(**self.complex_params)
test_call_delta = self.call_opt.delta(**self.complex_params)
test_call_gamma = self.call_opt.gamma(**self.complex_params)
test_call_vega = self.call_opt.vega(**self.complex_params)
test_call_theta = self.call_opt.theta(**self.complex_params)
test_call_rho = self.call_opt.rho(**self.complex_params)
test_call_iv = self.call_opt.implied_volatility(**self.complex_params)
expected_call_price = pd.DataFrame(data=[[15.55231058, 9.40714796, 9.87150919, 10.97983523],
[20.05777231, 16.15277891, 16.02977848, 16.27588191],
[15.81433361, 8.75227505, 6.65476799, 5.19785143]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_price.rename_axis("K", axis='columns', inplace=True)
expected_call_price.rename_axis("t", axis='rows', inplace=True)
expected_call_PnL = pd.DataFrame(data=[[12.06490811, 5.91974549, 6.38410672, 7.49243276],
[16.57036984, 12.66537644, 12.54237601, 12.78847944],
[12.32693114, 5.26487258, 3.16736552, 1.71044896]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_PnL.rename_axis("K", axis='columns', inplace=True)
expected_call_PnL.rename_axis("t", axis='rows', inplace=True)
expected_call_delta = pd.DataFrame(data=[[0.98935079, 0.69453583, 0.58292013, 0.53579465],
[0.79256302, 0.65515368, 0.60705014, 0.57529078],
[0.90573251, 0.6717088, 0.54283905, 0.43788167]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_delta.rename_axis("K", axis='columns', inplace=True)
expected_call_delta.rename_axis("t", axis='rows', inplace=True)
expected_call_gamma = pd.DataFrame(data=[[0.00373538, 0.02325203, 0.01726052, 0.01317896],
[0.01053321, 0.01130107, 0.01011038, 0.0090151],
[0.01253481, 0.0242596, 0.02420515, 0.02204576]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_gamma.rename_axis("K", axis='columns', inplace=True)
expected_call_gamma.rename_axis("t", axis='rows', inplace=True)
expected_call_vega = pd.DataFrame(data=[[0.02122104, 0.26419398, 0.29417607, 0.29948378],
[0.15544424, 0.20013116, 0.20888592, 0.2128651],
[0.02503527, 0.05383637, 0.05908709, 0.05870816]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_vega.rename_axis("K", axis='columns', inplace=True)
expected_call_vega.rename_axis("t", axis='rows', inplace=True)
expected_call_theta = pd.DataFrame(data=[[-0.00242788, -0.01322973, -0.02073753, -0.02747845],
[-0.03624253, -0.0521798, -0.06237363, -0.07180046],
[-0.12885912, -0.28334665, -0.33769702, -0.36349655]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_theta.rename_axis("K", axis='columns', inplace=True)
expected_call_theta.rename_axis("t", axis='rows', inplace=True)
expected_call_rho = pd.DataFrame(data=[[0.51543152, 0.37243495, 0.29872256, 0.26120194],
[0.18683002, 0.15599644, 0.14066931, 0.12935721],
[0.01800044, 0.0141648, 0.01156185, 0.00937301]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_rho.rename_axis("K", axis='columns', inplace=True)
expected_call_rho.rename_axis("t", axis='rows', inplace=True)
expected_call_iv = pd.DataFrame(data=self.complex_params["sigma"],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_iv.rename_axis("K", axis='columns', inplace=True)
expected_call_iv.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call_price, expected_call_price)
pd_test.assert_frame_equal(test_call_PnL, expected_call_PnL)
pd_test.assert_frame_equal(test_call_delta, expected_call_delta)
pd_test.assert_frame_equal(test_call_gamma, expected_call_gamma)
pd_test.assert_frame_equal(test_call_vega, expected_call_vega)
pd_test.assert_frame_equal(test_call_theta, expected_call_theta)
pd_test.assert_frame_equal(test_call_rho, expected_call_rho)
pd_test.assert_frame_equal(test_call_iv, expected_call_iv)
# put
test_put_price = self.put_opt.price(**self.complex_params)
test_put_PnL = self.put_opt.PnL(**self.complex_params)
test_put_delta = self.put_opt.delta(**self.complex_params)
test_put_gamma = self.put_opt.gamma(**self.complex_params)
test_put_vega = self.put_opt.vega(**self.complex_params)
test_put_theta = self.put_opt.theta(**self.complex_params)
test_put_rho = self.put_opt.rho(**self.complex_params)
test_put_iv = self.put_opt.implied_volatility(**self.complex_params)
expected_put_price = pd.DataFrame(data=[[0.02812357, 3.22314287, 7.9975943, 13.35166847],
[3.70370639, 9.31459014, 13.76319167, 18.54654119],
[0.62962992, 3.51971706, 6.38394341, 9.88603552]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_price.rename_axis("K", axis='columns', inplace=True)
expected_put_price.rename_axis("t", axis='rows', inplace=True)
expected_put_PnL = pd.DataFrame(data=[[-10.01320701, -6.81818772, -2.04373628, 3.31033788],
[-6.3376242, -0.72674045, 3.72186108, 8.5052106],
[-9.41170067, -6.52161353, -3.65738717, -0.15529507]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_PnL.rename_axis("K", axis='columns', inplace=True)
expected_put_PnL.rename_axis("t", axis='rows', inplace=True)
expected_put_delta = pd.DataFrame(data=[[-0.01064921, -0.30546417, -0.41707987, -0.46420535],
[-0.20743698, -0.34484632, -0.39294986, -0.42470922],
[-0.09426749, -0.3282912, -0.45716095, -0.56211833]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_delta.rename_axis("K", axis='columns', inplace=True)
expected_put_delta.rename_axis("t", axis='rows', inplace=True)
expected_put_gamma = copy.deepcopy(expected_call_gamma)
expected_put_vega = copy.deepcopy(expected_call_vega)
expected_put_theta = pd.DataFrame(data=[[-0.00038744, -0.00863707, -0.01349429, -0.01735551],
[-0.02615404, -0.03850937, -0.04554804, -0.05157676],
[-0.11041151, -0.26012269, -0.31065535, -0.33236619]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_theta.rename_axis("K", axis='columns', inplace=True)
expected_put_theta.rename_axis("t", axis='rows', inplace=True)
expected_put_rho = pd.DataFrame(data=[[-0.00691938, -0.21542518, -0.31936724, -0.38666626],
[-0.08152366, -0.14703153, -0.17901683, -0.2068619],
[-0.00249691, -0.00905916, -0.01302149, -0.01656895]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_rho.rename_axis("K", axis='columns', inplace=True)
expected_put_rho.rename_axis("t", axis='rows', inplace=True)
expected_put_iv = pd.DataFrame(data=self.complex_params["sigma"],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_iv.rename_axis("K", axis='columns', inplace=True)
expected_put_iv.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put_price, expected_put_price)
pd_test.assert_frame_equal(test_put_PnL, expected_put_PnL)
pd_test.assert_frame_equal(test_put_delta, expected_put_delta)
pd_test.assert_frame_equal(test_put_gamma, expected_put_gamma)
pd_test.assert_frame_equal(test_put_vega, expected_put_vega)
pd_test.assert_frame_equal(test_put_theta, expected_put_theta, check_less_precise=True)
pd_test.assert_frame_equal(test_put_rho, expected_put_rho)
pd_test.assert_frame_equal(test_put_iv, expected_put_iv)
# test gamma and vega consistency
pd_test.assert_frame_equal(test_call_gamma, test_put_gamma)
pd_test.assert_frame_equal(test_call_vega, test_put_vega)
class TestDigitalOption(unittest.TestCase):
"""Class to test public methods of DigitalOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = DigitalOption(mkt_env)
self.put_opt = DigitalOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 0.529923736000296
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 0.4413197518956652
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[2.96746057e-01, 5.31031469e-01, 7.30298621e-01],
[2.62783065e-01, 5.29285722e-01, 7.56890348e-01],
[2.13141191e-01, 5.26395060e-01, 7.95937699e-01],
[1.28345302e-01, 5.21278768e-01, 8.65777496e-01],
[7.93566840e-04, 5.09205971e-01, 9.96790994e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[0.66879322, 0.43450781, 0.23524066],
[0.71099161, 0.44448895, 0.21688433],
[0.7688046, 0.45555073, 0.18600809],
[0.86197582, 0.46904235, 0.12454362],
[0.99783751, 0.4894251, 0.00184008]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-6)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[2.96746057e-01, 5.31031469e-01, 7.30298621e-01],
[2.62783065e-01, 5.29285722e-01, 7.56890348e-01],
[2.13141191e-01, 5.26395060e-01, 7.95937699e-01],
[1.28345302e-01, 5.21278768e-01, 8.65777496e-01],
[7.93566840e-04, 5.09205971e-01, 9.96790994e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[0.66879322, 0.43450781, 0.23524066],
[0.71099161, 0.44448895, 0.21688433],
[0.7688046, 0.45555073, 0.18600809],
[0.86197582, 0.46904235, 0.12454362],
[0.99783751, 0.4894251, 0.00184008]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 0.23317767915072352
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -0.22747347241997717
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 0.23428541, 0.43355256],
[-0.03396299, 0.23253966, 0.46014429],
[-0.08360487, 0.229649, 0.49919164],
[-0.16840076, 0.22453271, 0.56903144],
[-0.29595249, 0.21245991, 0.70004494]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -0.23428541, -0.43355256],
[0.04219839, -0.22430427, -0.4519089],
[0.10001137, -0.2132425, -0.48278514],
[0.19318259, -0.19975088, -0.5442496],
[0.32904428, -0.17936812, -0.66695314]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-6)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 0.23428541, 0.43355256],
[-0.03396299, 0.23253966, 0.46014429],
[-0.08360487, 0.229649, 0.49919164],
[-0.16840076, 0.22453271, 0.56903144],
[-0.29595249, 0.21245991, 0.70004494]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -0.23428541, -0.43355256],
[0.04219839, -0.22430427, -0.4519089],
[0.10001137, -0.2132425, -0.48278514],
[0.19318259, -0.19975088, -0.5442496],
[0.32904428, -0.17936812, -0.66695314]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
|
pd_test.assert_frame_equal(test_put, expected_put)
|
pandas.testing.assert_frame_equal
|
from logging import log
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import ScalarFormatter
from flask import Flask, render_template, request
from tkinter import *
from tkinter import ttk
import sys
import os
import shutil
import random
from matplotlib.ticker import MaxNLocator
from pathlib import Path
import math
import copy
#from decimal import Decimal, ROUND_HALF_UP
def readinput(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
symbol = csv_input['Symbol']
value = csv_input['Value']
unit = csv_input['Unit']
valueDict = {}
unitDict = {}
for i, j, k in zip(symbol, value, unit):
valueDict[i] = float(j)
unitDict[i] = str(k)
return valueDict, unitDict
def CeqLHVFunc(filename,fuelName):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
CeqLHV = csv_input['CeqLHV']
fuelDict = {}
for i, j in zip(fuelType, CeqLHV):
fuelDict[i] = float(j)
return fuelDict[fuelName]
def Cco2Func(filename,fuelName):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
Cco2 = csv_input['Cco2']
Cco2Dict = {}
for i, j in zip(fuelType, Cco2):
Cco2Dict[i] = float(j)
return Cco2Dict[fuelName]
def initialFleetFunc(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
year = csv_input['Year']
TEU = csv_input['TEU']
iniFleetDict = {}
k = 0
for i, j in zip(year, TEU):
iniFleetDict.setdefault(k,{})
iniFleetDict[k]['year'] = int(i)
iniFleetDict[k]['TEU'] = float(j)
k += 1
return iniFleetDict
def decisionListFunc(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",").fillna(0)
Year = csv_input['Year']
Order = csv_input['Order']
fuelType = csv_input['Fuel type']
WPS = csv_input['WPS']
SPS = csv_input['SPS']
CCS = csv_input['CCS']
CAP = csv_input['CAP']
Speed = csv_input['Speed']
Fee = csv_input['Fee']
valueDict = {}
for i, j, k, l, m, n, o, p, q in zip(Year, Order, fuelType, WPS, SPS, CCS, CAP, Speed, Fee):
valueDict.setdefault(int(i),{})
valueDict[int(i)]['Order'] = int(j)
valueDict[int(i)]['fuelType'] = k
valueDict[int(i)]['WPS'] = int(l)
valueDict[int(i)]['SPS'] = int(m)
valueDict[int(i)]['CCS'] = int(n)
valueDict[int(i)]['CAP'] = float(o)
valueDict[int(i)]['Speed'] = float(p)
valueDict[int(i)]['Fee'] = float(q)
return valueDict
def fleetPreparationFunc(fleetAll,initialFleetFile,numCompany,startYear,lastYear,elapsedYear,tOpSch,tbid,valueDict,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
fleetAll.setdefault(numCompany,{})
fleetAll[numCompany].setdefault('total',{})
fleetAll[numCompany]['total']['sale'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['g'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['gTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['saleTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['cta'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['overDi'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costShipBasicHFO'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costShip'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costFuel'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['dcostFuel'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costAdd'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costAll'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['maxCta'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['rocc'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costRfrb'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['dcostEco'] = np.zeros(lastYear-startYear+1)
#fleetAll[numCompany]['total']['dCostCnt'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costCnt'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['nTransCnt'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['atOnce'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['mSubs'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['mTax'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['balance'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['demand'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['profit'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['profitSum'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['gSum'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['Idx'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['lastOrderFuel'] = 'HFO/Diesel'
fleetAll[numCompany]['total']['lastOrderCAP'] = 20000
initialFleets = initialFleetFunc(initialFleetFile)
for i in range(len(initialFleets)):
orderYear = initialFleets[i]['year'] - tbid
iniT = startYear - initialFleets[i]['year']
iniCAPcnt = initialFleets[i]['TEU']
fleetAll = orderShipFunc(fleetAll,numCompany,'HFO',0,0,0,iniCAPcnt,tOpSch,tbid,iniT,orderYear,elapsedYear,valueDict,NShipFleet,True,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
return fleetAll
def unitCostFuelFunc(filename,fuelName,year):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
measureYear = np.array(csv_input['Year'],dtype='float64')
measureHFO = np.array(csv_input['HFO'],dtype='float64')
measure = np.array(csv_input[fuelName],dtype='float64')
fittedHFO = interpolate.interp1d(measureYear, measureHFO)
fitted = interpolate.interp1d(measureYear, measure)
if year >= 2020:
interp = fitted(year)
interpHFO = fittedHFO(year)
else:
interp = measure[0]
interpHFO = measureHFO[0]
return interp, interpHFO
def rShipBasicFunc(filename,fuelName,CAPcnt):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
rShipBasic = csv_input['rShipBasic']
fuelDict = {}
for i, j in zip(fuelType, rShipBasic):
fuelDict[i] = float(j)
return fuelDict[fuelName]
def wDWTFunc(kDWT1,CAPcnt,kDWT2):
wDWT = kDWT1*CAPcnt+kDWT2
return wDWT
def wFLDFunc(kFLD1,wDWT,kFLD2):
wFLD = kFLD1*wDWT+kFLD2
return wFLD
def dFunc(Dyear,Hday,v,Rrun):
d = Dyear*Hday*v*Rrun
return d
def fShipFunc(kShip1,kShip2,wDWT,wFLD,rocc,CNM2km,v,d,rWPS,windPr,CeqLHV):
fShipORG = (kShip1/1000)*(wFLD-(1-kShip2*rocc)*wDWT)*(wFLD**(-1/3))*((CNM2km*v)**2)*CNM2km*d
if windPr:
fShip = CeqLHV*fShipORG*(1-rWPS)
else:
fShip = CeqLHV*fShipORG
return fShipORG, fShip
def fAuxFunc(Dyear,Hday,Rrun,kAux1,kAux2,wDWT,rSPS,solar,CeqLHV):
fAuxORG = Dyear*Hday*Rrun*(kAux1+kAux2*wDWT)/1000
if solar:
fAux = CeqLHV*fAuxORG*(1-rSPS)
else:
fAux = CeqLHV*fAuxORG
return fAuxORG, fAux
def gFunc(Cco2ship,fShip,Cco2aux,fAux,rCCS,CCS):
gORG = Cco2ship*fShip+Cco2aux*fAux
if CCS:
g = gORG*(1-rCCS)
else:
g = gORG
return gORG, g
def maxCtaFunc(CAPcnt,d):
maxCta = CAPcnt*d
return maxCta
def ctaFunc(CAPcnt,rocc,d):
cta = CAPcnt*rocc*d
return cta
def costFuelFunc(unitCostFuelHFO, unitCostFuel, fShipORG, fAuxORG, fShip, fAux):
costFuelORG = unitCostFuelHFO*(fShipORG+fAuxORG)
costFuel = unitCostFuel*(fShip+fAux)
dcostFuel = costFuel - costFuelORG
return costFuelORG, costFuel, dcostFuel
def costShipFunc(kShipBasic1, CAPcnt, kShipBasic2, rShipBasic, dcostWPS, dcostSPS, dcostCCS, flagWPS, flagSPS, flagCCS):
costShipBasicHFO = kShipBasic1 * CAPcnt + kShipBasic2
costShipBasic = rShipBasic * costShipBasicHFO
cAdditionalEquipment = 0
if flagWPS:
cAdditionalEquipment += dcostWPS
elif flagSPS:
cAdditionalEquipment += dcostSPS
elif flagCCS:
cAdditionalEquipment += dcostCCS
costShipAdd = cAdditionalEquipment * costShipBasicHFO
costShip = costShipBasic + costShipAdd
return costShipBasicHFO, costShipBasic, costShipAdd, costShip
def additionalShippingFeeFunc(tOp, tOpSch, dcostFuelAll, costShipAll, costShipBasicHFO):
if tOp <= tOpSch:
dcostShipping = dcostFuelAll + (costShipAll-costShipBasicHFO)/tOpSch
else:
dcostShipping = dcostFuelAll
return dcostShipping
def demandScenarioFunc(year,kDem1,kDem2,kDem3,kDem4):
Di = (kDem1*year**2 + kDem2*year + kDem3)*1000000000/kDem4
return Di
def playOrderFunc(cost,playOrder):
unique, counts = np.unique(cost, return_counts=True)
if np.amax(counts) == 1:
playOrderNew = playOrder[np.argsort(cost)]
elif np.amax(counts) == 2:
minCost = np.amin(cost)
maxCost = np.amax(cost)
if minCost == unique[counts == 1]:
playOrderNew = np.zeros(3)
playOrderNew[0] = playOrder[cost == minCost]
playOrderNew[1:3] = np.random.permutation(playOrder[cost!=minCost])
else:
playOrderNew = np.zeros(3)
playOrderNew[2] = playOrder[cost == maxCost]
playOrderNew[0:2] = np.random.permutation(playOrder[cost!=maxCost])
else:
playOrderNew = np.random.permutation(playOrder)
return playOrderNew
def rEEDIreqCurrentFunc(wDWT,rEEDIreq):
if wDWT >= 200000:
rEEDIreqCurrent = rEEDIreq[0]
elif wDWT >= 120000:
rEEDIreqCurrent = rEEDIreq[1]
else:
rEEDIreqCurrent = rEEDIreq[2]
return rEEDIreqCurrent
def EEDIreqFunc(kEEDI1,wDWT,kEEDI2,rEEDIreq):
EEDIref = kEEDI1*wDWT**kEEDI2
EEDIreq = (1-rEEDIreq)*EEDIref
return EEDIref, EEDIreq
def EEDIattFunc(wDWT,wMCR,kMCR1,kMCR2,kMCR3,kPAE1,kPAE2,rCCS,vDsgn,rWPS,Cco2ship,SfcM,SfcA,rSPS,Cco2aux,EEDIreq,flagWPS,flagSPS,flagCCS):
if wDWT < wMCR:
MCRM = kMCR1*wDWT + kMCR2
else:
MCRM = kMCR3
PA = kPAE1*MCRM+kPAE2
def _EEDIcalc(vDsgnRed):
if flagWPS:
rWPStemp = rWPS
else:
rWPStemp = 0
if flagSPS:
rSPStemp = rSPS
else:
rSPStemp = 0
if flagCCS:
rCCStemp = rCCS
else:
rCCStemp = 0
return ((1-rCCStemp)/(0.7*wDWT*vDsgnRed))*((1-rWPStemp)*Cco2ship*0.75*MCRM*SfcM*(vDsgnRed/vDsgn)**3 + (1-rSPStemp)*Cco2aux*PA*SfcA)
vDsgnRed = vDsgn
EEDIatt = _EEDIcalc(vDsgnRed)
while EEDIatt > EEDIreq:
vDsgnRed -= 1
if vDsgnRed == 0:
break
EEDIatt = _EEDIcalc(vDsgnRed)
return MCRM, PA, EEDIatt, vDsgnRed
def regPreFunc(nDec):
regDec = {}
regDec['rEEDIreq'] = np.zeros((nDec,3))
regDec['Subsidy'] = np.zeros(nDec)
regDec['Ctax'] = np.zeros(nDec)
regDec['rEEDIreq'][0,0] = 0.5
regDec['rEEDIreq'][0,1] = 0.45
regDec['rEEDIreq'][0,2] = 0.35
return regDec
def regDecFunc(regDec,nReg,currentYear):
def _regDecGui1(regDec,nReg,currentYear):
def _buttonCommand(regDec,nReg,root):
if float(v1.get()) <= 100 and float(v2.get()) <= 100 and float(v3.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0 and float(v3.get()) >= 0:
regDec['rEEDIreq'][nReg,0] = float(v1.get()) / 100
regDec['rEEDIreq'][nReg,1] = float(v2.get()) / 100
regDec['rEEDIreq'][nReg,2] = float(v3.get()) / 100
root.quit()
root.destroy()
else:
button['state'] = 'disabled'
def _buttonCommandCheck():
if float(v1.get()) <= 100 and float(v2.get()) <= 100 and float(v3.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0 and float(v3.get()) >= 0:
button['state'] = 'normal'
else:
button['state'] = 'disabled'
root = Tk()
root.title('Regulator : Reduction Rate for EEXI / EEDI in '+str(currentYear))
width = 600
height = 300
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
root['bg'] = '#a3d6cc'
style = ttk.Style()
style.theme_use('default')
style.configure('new.TFrame', foreground='black', background='#a3d6cc')
style.configure('new.TLabel', foreground='black', background='#a3d6cc')
style.configure('new.TButton', foreground='black', background='#a3d6cc')
style.configure('new.TCheckbutton', foreground='black', background='#a3d6cc')
style.configure('new.TEntry', foreground='black', background='#a3d6cc')
# Frame
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
# Checkbutton
v1 = StringVar()
if nReg == 0:
v1.set('0') # 初期化
else:
v1.set(str(100*regDec['rEEDIreq'][nReg-1,0])) # 初期化
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v1)
label1 = ttk.Label(frame, style='new.TLabel',text='wDWT >= 200,000', padding=(5, 2))
label11 = ttk.Label(frame, style='new.TLabel',text='% <= 100%', padding=(5, 2))
label111 = ttk.Label(frame, style='new.TLabel',text='0% <=', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input reduction rate for EEXI / EEDI, and then click "Check" & "Next".', padding=(5, 2))
# Checkbutton
v2 = StringVar()
if nReg == 0:
v2.set('0') # 初期化
else:
v2.set(str(100*regDec['rEEDIreq'][nReg-1,1])) # 初期化
cb2 = ttk.Entry(frame, style='new.TEntry', textvariable=v2)
label2 = ttk.Label(frame, style='new.TLabel',text='120,000 <= wDWT < 200,000', padding=(5, 2))
label22 = ttk.Label(frame, style='new.TLabel',text='% <= 100%', padding=(5, 2))
label222 = ttk.Label(frame, style='new.TLabel',text='0% <=', padding=(5, 2))
# Checkbutton
v3 = StringVar()
if nReg == 0:
v3.set('0') # 初期化
else:
v3.set(str(100*regDec['rEEDIreq'][nReg-1,2])) # 初期化
cb3 = ttk.Entry(frame, style='new.TEntry', textvariable=v3)
label3 = ttk.Label(frame, style='new.TLabel',text='wDWT < 120,000', padding=(5, 2))
label33 = ttk.Label(frame, style='new.TLabel',text='% <= 100%', padding=(5, 2))
label333 = ttk.Label(frame, style='new.TLabel',text='0% <=', padding=(5, 2))
# Button
button = ttk.Button(frame, style='new.TButton',text='Next', state='disabled', command=lambda: _buttonCommand(regDec,nReg,root))
button1 = ttk.Button(frame, style='new.TButton',text='Check', command=lambda: _buttonCommandCheck())
# Layout
label11.grid(row=0, column=3)
cb1.grid(row=0, column=2)
label111.grid(row=0, column=1)
label1.grid(row=0, column=0)
label22.grid(row=1, column=3)
cb2.grid(row=1, column=2)
label222.grid(row=1, column=1)
label2.grid(row=1, column=0)
label33.grid(row=2, column=3)
cb3.grid(row=2, column=2)
label333.grid(row=2, column=1)
label3.grid(row=2, column=0)
button.grid(row=3, column=3)
button1.grid(row=3, column=2)
labelExpl.grid(row=5, column=0, columnspan=3)
root.deiconify()
root.mainloop()
return regDec
def _regDecGui2(regDec,nReg,currentYear):
def _buttonCommand(regDec,nReg,root):
if float(v1.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0:
regDec['Subsidy'][nReg] = float(v1.get()) / 100
regDec['Ctax'][nReg] = float(v2.get())
root.quit()
root.destroy()
else:
button['state'] = 'disabled'
def _buttonCommandCheck():
if float(v1.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0:
button['state'] = 'normal'
else:
button['state'] = 'disabled'
root = Tk()
root.title('Regulator : Subsidy & Carbon tax in'+str(currentYear))
width = 800
height = 300
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
root['bg'] = '#a3d6cc'
style = ttk.Style()
style.theme_use('default')
style.configure('new.TFrame', foreground='black', background='#a3d6cc')
style.configure('new.TLabel', foreground='black', background='#a3d6cc')
style.configure('new.TButton', foreground='black', background='#a3d6cc')
style.configure('new.TCheckbutton', foreground='black', background='#a3d6cc')
style.configure('new.TEntry', foreground='black', background='#a3d6cc')
# Frame
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
# Checkbutton
v1 = StringVar()
if nReg == 0:
v1.set('0') # 初期化
else:
v1.set(str(int(100*regDec['Subsidy'][nReg-1]))) # 初期化
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v1)
label1 = ttk.Label(frame, style='new.TLabel', text='Subsidy rate', padding=(5, 2))
label11 = ttk.Label(frame, style='new.TLabel', text='% <= 100%', padding=(5, 2))
label111 = ttk.Label(frame, style='new.TLabel', text='0% <=', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input subsidy and carbon tax, and then click "Check" & "Next".', padding=(5, 2))
# Checkbutton
v2 = StringVar()
if nReg == 0:
v2.set('0') # 初期化
else:
v2.set(str(int(regDec['Ctax'][nReg-1]))) # 初期化
cb2 = ttk.Entry(frame, style='new.TEntry', textvariable=v2)
label2 = ttk.Label(frame, style='new.TLabel', text='Carbon tax [$/ton]', padding=(5, 2))
#label22 = ttk.Label(frame, style='new.TLabel', text='% <= 100%', padding=(5, 2))
label222 = ttk.Label(frame, style='new.TLabel', text='0 <=', padding=(5, 2))
# Button
button = ttk.Button(frame, style='new.TButton', text='Next', state='disabled', command=lambda: _buttonCommand(regDec,nReg,root))
button1 = ttk.Button(frame, style='new.TButton', text='Check', command=lambda: _buttonCommandCheck())
# Layout
label11.grid(row=0, column=3)
cb1.grid(row=0, column=2)
label111.grid(row=0, column=1)
label1.grid(row=0, column=0)
#label22.grid(row=1, column=3)
cb2.grid(row=1, column=2)
label222.grid(row=1, column=1)
label2.grid(row=1, column=0)
button.grid(row=3, column=3)
button1.grid(row=3, column=2)
labelExpl.grid(row=5, column=0, columnspan=3)
root.deiconify()
root.mainloop()
return regDec
regDec = _regDecGui1(regDec,nReg,currentYear)
regDec = _regDecGui2(regDec,nReg,currentYear)
return regDec
def scrapRefurbishFunc(fleetAll,numCompany,elapsedYear,currentYear,valueDict,tOpSch,rEEDIreq):
def _scrapOrRefurbishGui(fleetAll,numCompany,tOpSch,valueDict,currentYear,rEEDIreq):
def _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v,Sys):
NumFleet = len(fleetAll[numCompany])
numAlive = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
fleetAll[numCompany][keyFleet][Sys] = int(v[numAlive].get())
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
label14[numAlive]['text'] = str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]))
label15[numAlive]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp]))
label16[numAlive]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp]))
if valueDict['vMin'] < fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
button2['state'] = 'normal'
numAlive += 1
#fleetAll[numCompany][keyFleet] = fleetAll[numCompany][keyFleet]
def _buttonCommandNext(root,fleetAll,numCompany,tOpSch):
NumFleet = len(fleetAll[numCompany])
j = 0
goAhead = True
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if valueDict['vMin'] > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] and v4[j].get() != '1':
goAhead = False
j += 1
if goAhead:
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
if v4[j].get() == '1':
fleetAll[numCompany][keyFleet]['tOp'] = tOpSch
j += 1
root.quit()
root.destroy()
else:
button2['state'] = 'disabled'
def _buttonCommandCheck(fleetAll,valueDict,rEEDIreq):
NumFleet = len(fleetAll[numCompany])
numAlive = 0
goAhead = True
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
fleetAll[numCompany][keyFleet]['WPS'] = int(v1[numAlive].get())
fleetAll[numCompany][keyFleet]['SPS'] = int(v2[numAlive].get())
fleetAll[numCompany][keyFleet]['CCS'] = int(v3[numAlive].get())
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
if valueDict['vMin'] > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] and v4[numAlive].get() != '1':
goAhead = False
numAlive += 1
if goAhead:
button2['state'] = 'normal'
def _buttonCommandNext2(root):
root.quit()
root.destroy()
def _buttonCommandAtOnce(Sys):
NumFleet = len(fleetAll[numCompany])
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
if Sys == 'WPS':
if label10[j].state() != ('disabled', 'selected'):
if v1[j].get() == '1':
v1[j].set('0')
elif v1[j].get() == '0':
v1[j].set('1')
fleetAll[numCompany][keyFleet][Sys] = int(v1[j].get())
elif Sys == 'SPS':
if label11[j].state() != ('disabled', 'selected'):
if v2[j].get() == '1':
v2[j].set('0')
elif v2[j].get() == '0':
v2[j].set('1')
fleetAll[numCompany][keyFleet][Sys] = int(v2[j].get())
elif Sys == 'CCS':
if label12[j].state() != ('disabled', 'selected') and label12[j].state() != ('disabled',):
if v3[j].get() == '1':
v3[j].set('0')
elif v3[j].get() == '0':
v3[j].set('1')
fleetAll[numCompany][keyFleet][Sys] = int(v3[j].get())
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
label14[j]['text'] = str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]))
label15[j]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp]))
label16[j]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp]))
if valueDict['vMin'] < fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
button2['state'] = 'normal'
fleetAll[numCompany][keyFleet] = fleetAll[numCompany][keyFleet]
j += 1
root = Tk()
root.title('Company '+str(numCompany)+' : Scrap or Refurbish in '+str(currentYear))
width = 1000
height = 500
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
canvas = Canvas(root, width=width, height=height)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
frame.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
vbar = Scrollbar(root, orient="vertical")
vbar.config(command=canvas.yview)
vbar.pack(side=RIGHT,fill="y")
canvas['bg'] = color
canvas.create_window((placeX, placeY), window=frame, anchor=CENTER)
canvas.pack()
canvas.update_idletasks()
canvas.configure(yscrollcommand=vbar.set)
canvas.yview_moveto(0)
# Label
label0 = ttk.Label(frame, style='new.TLabel', text='No.', padding=(5, 2))
labelDeli = ttk.Label(frame, style='new.TLabel',text='Delivery year', padding=(5, 2))
label1 = ttk.Label(frame, style='new.TLabel',text='Fuel type', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel',text='Capacity [TEU]', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel',text='WPS', padding=(5, 2))
label4 = ttk.Label(frame, style='new.TLabel',text='SPS', padding=(5, 2))
label5 = ttk.Label(frame, style='new.TLabel',text='CCS', padding=(5, 2))
label7 = ttk.Label(frame, style='new.TLabel',text='Maximum speed [kt]', padding=(5, 2))
label152 = ttk.Label(frame, style='new.TLabel',text='EEXIreq [g/(ton*NM)]', padding=(5, 2))
label162 = ttk.Label(frame, style='new.TLabel',text='EEXIatt [g/(ton*NM)]', padding=(5, 2))
labelScrap = ttk.Label(frame, style='new.TLabel',text='Scrap', padding=(5, 2))
label00 = []
labelDeli1 = []
label8 = []
label9 = []
label10 = []
label11 = []
label12 = []
label14 = []
label15 = []
label16 = []
buttonScrap = []
v1 = []
v2 = []
v3 = []
v4 = []
NumFleet = len(fleetAll[numCompany])
for keyFleet in range(1,NumFleet):
fleetAll[numCompany][keyFleet] = fleetAll[numCompany][keyFleet]
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
labelDeli1.append(ttk.Label(frame, style='new.TLabel',text=str(fleetAll[numCompany][keyFleet]['delivery']), padding=(5, 2)))
label00.append(ttk.Label(frame, style='new.TLabel',text=str(keyFleet), padding=(5, 2)))
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['fuelName'] == 'HFO':
label8.append(ttk.Label(frame, style='new.TLabel',text='HFO/Diesel', padding=(5, 2)))
else:
label8.append(ttk.Label(frame, style='new.TLabel',text=fleetAll[numCompany][keyFleet]['fuelName'], padding=(5, 2)))
label9.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['CAPcnt'])), padding=(5, 2)))
v1.append(StringVar())
if fleetAll[numCompany][keyFleet]['WPS']:
v1[-1].set('1')
label10.append(ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v1,'WPS'),variable=v1[-1]))
else:
v1[-1].set('0')
label10.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v1,'WPS'),variable=v1[-1]))
v2.append(StringVar())
if fleetAll[numCompany][keyFleet]['SPS']:
v2[-1].set('1')
label11.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v2,'SPS'),variable=v2[-1]))
else:
v2[-1].set('0')
label11.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v2,'SPS'),variable=v2[-1]))
v3.append(StringVar())
if fleetAll[numCompany][keyFleet]['CCS']:
v3[-1].set('1')
label12.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v3,'CCS'),variable=v3[-1]))
elif currentYear < valueDict['addSysYear']+2:
v3[-1].set('0')
label12.append(ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v3,'CCS'),variable=v3[-1]))
else:
v3[-1].set('0')
label12.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v3,'CCS'),variable=v3[-1]))
label14.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp])), padding=(5, 2)))
label15.append(ttk.Label(frame, style='new.TLabel',text='{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp]), padding=(5, 2)))
label16.append(ttk.Label(frame, style='new.TLabel',text='{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp]), padding=(5, 2)))
v4.append(StringVar())
buttonScrap.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), variable=v4[-1]))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Check additional systems and scrap button if you want, and then click "Check" & "Next". You can check all the button at once by "Check all at once".', padding=(5, 2))
labelExpl2 = ttk.Label(frame, style='new.TLabel', text='Guide: You have no fleet. Click "Next".', padding=(5, 2))
# Button
button1 = ttk.Button(frame, style='new.TButton', text='Check', command=lambda: _buttonCommandCheck(fleetAll,valueDict,rEEDIreq))
button2 = ttk.Button(frame, style='new.TButton', text='Next', state='disabled', command=lambda: _buttonCommandNext(root,fleetAll,numCompany,tOpSch))
buttonWPS = ttk.Button(frame, style='new.TButton', text='Check all WPS at once', command=lambda: _buttonCommandAtOnce('WPS'))
buttonSPS = ttk.Button(frame, style='new.TButton', text='Check all SPS at once', command=lambda: _buttonCommandAtOnce('SPS'))
buttonCCS = ttk.Button(frame, style='new.TButton', text='Check all CCS at once', command=lambda: _buttonCommandAtOnce('CCS'))
button22 = ttk.Button(frame, style='new.TButton',text='Next', command=lambda: _buttonCommandNext2(root))
# Layout
if len(label8) > 0:
label0.grid(row=0, column=0)
labelDeli.grid(row=0, column=1)
label1.grid(row=0, column=2)
label2.grid(row=0, column=3)
label3.grid(row=0, column=4)
label4.grid(row=0, column=5)
label5.grid(row=0, column=6)
label7.grid(row=0, column=7)
label152.grid(row=0, column=8)
label162.grid(row=0, column=9)
labelScrap.grid(row=0, column=10)
for i, j in enumerate(label8):
labelDeli1[i].grid(row=i+1, column=1, pady=0)
label00[i].grid(row=i+1, column=0, pady=0)
label8[i].grid(row=i+1, column=2, pady=0)
label9[i].grid(row=i+1, column=3, pady=0)
label10[i].grid(row=i+1, column=4, pady=0)
label11[i].grid(row=i+1, column=5, pady=0)
label12[i].grid(row=i+1, column=6, pady=0)
label14[i].grid(row=i+1, column=7, pady=0)
label15[i].grid(row=i+1, column=8, pady=0)
label16[i].grid(row=i+1, column=9, pady=0)
buttonScrap[i].grid(row=i+1, column=10, pady=0)
button1.grid(row=i+2, column=9)
button2.grid(row=i+2, column=10)
buttonWPS.grid(row=i+2, column=1)
buttonSPS.grid(row=i+2, column=2)
buttonCCS.grid(row=i+2, column=3)
labelExpl.grid(row=i+3, column=0, columnspan=10)
else:
labelExpl2.grid(row=0, column=0)
button22.grid(row=0, column=1)
root.deiconify()
root.mainloop()
return fleetAll
def _dcostCntGui(fleetAll,numCompany,elapsedYear):
def _buttonCommand(fleetAll,numCompany,elapsedYear,root,v):
fleetAll[numCompany]['total']['dcostCnt'][elapsedYear] = v.get()
root.destroy()
root.quit()
root = Tk()
root.title('Company '+str(numCompany)+' : Additional Shipping Fee Per Container in '+str(currentYear))
width = 500
height = 200
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
style.configure('new.TEntry', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
v1 = StringVar()
if elapsedYear == 0:
v1.set('0')
else:
v1.set(str(int(fleetAll[numCompany]['total']['dcostCnt'][elapsedYear-1])))
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v1)
label1 = ttk.Label(frame, style='new.TLabel', text='Additional container fee dC (-1000 <= dC <= 1000)', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel', text='Nominal shipping cost: 1500 $/container', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel', text='$', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input additional shipping fee per container, and then click "Complete".', padding=(5, 2))
button = ttk.Button(frame, style='new.TButton', text='Complete', command=lambda: _buttonCommand(fleetAll,numCompany,elapsedYear,root,v1))
label1.grid(row=1, column=0)
label2.grid(row=0, column=0)
label3.grid(row=1, column=2)
cb1.grid(row=1, column=1)
button.grid(row=2, column=1)
labelExpl.grid(row=3, column=0,columnspan=5)
root.deiconify()
root.mainloop()
return fleetAll
# calculate EEDI
NumFleet = len(fleetAll[numCompany])
for keyFleet in range(1,NumFleet):
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
# decide to scrap or refurbish currently alive fleet
fleetAll = _scrapOrRefurbishGui(fleetAll,numCompany,tOpSch,valueDict,currentYear,rEEDIreq)
for keyFleet in range(1,NumFleet):
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
cAdditionalEquipment = 0
if fleetAll[numCompany][keyFleet]['lastWPS'] != fleetAll[numCompany][keyFleet]['WPS'] and fleetAll[numCompany][keyFleet]['WPS']:
cAdditionalEquipment += valueDict['dcostWPS']
elif fleetAll[numCompany][keyFleet]['lastSPS'] != fleetAll[numCompany][keyFleet]['SPS'] and fleetAll[numCompany][keyFleet]['SPS']:
cAdditionalEquipment += valueDict['dcostSPS']
elif fleetAll[numCompany][keyFleet]['lastCCS'] != fleetAll[numCompany][keyFleet]['CCS'] and fleetAll[numCompany][keyFleet]['CCS']:
cAdditionalEquipment += valueDict['dcostCCS']
fleetAll[numCompany][keyFleet]['lastWPS'] = fleetAll[numCompany][keyFleet]['WPS']
fleetAll[numCompany][keyFleet]['lastSPS'] = fleetAll[numCompany][keyFleet]['SPS']
fleetAll[numCompany][keyFleet]['lastCCS'] = fleetAll[numCompany][keyFleet]['CCS']
fleetAll[numCompany][keyFleet]['costRfrb'][tOpTemp] = cAdditionalEquipment * fleetAll[numCompany][keyFleet]['costShipBasicHFO']
# decide additional shipping fee per container
#_dcostCntGui(fleetAll,numCompany,elapsedYear)
return fleetAll
def orderShipFunc(fleetAll,numCompany,fuelName,WPS,SPS,CCS,CAPcnt,tOpSch,tbid,iniT,currentYear,elapsedYear,valueDict,NShipFleet,ifIni,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
NumFleet = len(fleetAll[numCompany])
fleetAll[numCompany].setdefault(NumFleet,{})
fleetAll[numCompany][NumFleet]['fuelName'] = fuelName
fleetAll[numCompany][NumFleet]['WPS'] = WPS
fleetAll[numCompany][NumFleet]['SPS'] = SPS
fleetAll[numCompany][NumFleet]['CCS'] = CCS
fleetAll[numCompany][NumFleet]['lastWPS'] = WPS
fleetAll[numCompany][NumFleet]['lastSPS'] = SPS
fleetAll[numCompany][NumFleet]['lastCCS'] = CCS
fleetAll[numCompany][NumFleet]['CAPcnt'] = float(CAPcnt)
fleetAll[numCompany][NumFleet]['wDWT'] = wDWTFunc(valueDict["kDWT1"],fleetAll[numCompany][NumFleet]['CAPcnt'],valueDict["kDWT2"])
fleetAll[numCompany][NumFleet]['wFLD'] = wFLDFunc(valueDict["kFLD1"],fleetAll[numCompany][NumFleet]['wDWT'],valueDict["kFLD2"])
fleetAll[numCompany][NumFleet]['CeqLHVship'] = CeqLHVFunc(parameterFile2,fleetAll[numCompany][NumFleet]['fuelName'])
fleetAll[numCompany][NumFleet]['CeqLHVaux'] = CeqLHVFunc(parameterFile12,fleetAll[numCompany][NumFleet]['fuelName'])
fleetAll[numCompany][NumFleet]['Cco2ship'] = Cco2Func(parameterFile3,fleetAll[numCompany][NumFleet]['fuelName'])
if fuelName == 'HFO':
fleetAll[numCompany][NumFleet]['Cco2aux'] = Cco2Func(parameterFile3,'Diesel')
else:
fleetAll[numCompany][NumFleet]['Cco2aux'] = Cco2Func(parameterFile3,fleetAll[numCompany][NumFleet]['fuelName'])
fleetAll[numCompany][NumFleet]['rShipBasic'] = rShipBasicFunc(parameterFile5,fleetAll[numCompany][NumFleet]['fuelName'],fleetAll[numCompany][NumFleet]['CAPcnt'])
fleetAll[numCompany][NumFleet]['delivery'] = currentYear+tbid
fleetAll[numCompany][NumFleet]['tOp'] = iniT
fleetAll[numCompany][NumFleet]['costShipBasicHFO'], fleetAll[numCompany][NumFleet]['costShipBasic'], fleetAll[numCompany][NumFleet]['costShipAdd'], fleetAll[numCompany][NumFleet]['costShip'] = costShipFunc(valueDict["kShipBasic1"], fleetAll[numCompany][NumFleet]["CAPcnt"], valueDict["kShipBasic2"], fleetAll[numCompany][NumFleet]['rShipBasic'], valueDict["dcostWPS"], valueDict["dcostSPS"], valueDict["dcostCCS"], fleetAll[numCompany][NumFleet]['WPS'], fleetAll[numCompany][NumFleet]['SPS'], fleetAll[numCompany][NumFleet]['CCS'])
if iniT == 0 and not ifIni:
fleetAll[numCompany]['total']['costShip'][elapsedYear+2] += NShipFleet * fleetAll[numCompany][NumFleet]['costShip']
fleetAll[numCompany]['total']['costShipBasicHFO'][elapsedYear+2] += NShipFleet * fleetAll[numCompany][NumFleet]['costShipBasicHFO']
fleetAll[numCompany][NumFleet]['v'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['d'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fShipORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fAuxORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['gORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['costFuelORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['costFuel'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['dcostFuel'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fShip'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fAux'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['g'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['cta'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['dcostShipping'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['gTilde'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['costRfrb'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['EEDIref'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['EEDIreq'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['EEDIatt'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['vDsgnRed'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['MCRM'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['PA'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['year'] = np.zeros(tOpSch)
return fleetAll
def orderPhaseFunc(fleetAll,numCompany,valueDict,elapsedYear,tOpSch,tbid,currentYear,rEEDIreq,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
def _orderShipGui(fleetAll,numCompany,valueDict,elapsedYear,tOpSch,tbid,currentYear,rEEDIreq,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
def _EEDIcalc(rEEDIreq,parameterFile3,valueDict):
fuelType = v1.get()
CAP = float(v2.get())
WPS = int(v3.get())
SPS = int(v4.get())
CCS = int(v5.get())
wDWT = wDWTFunc(valueDict['kDWT1'],CAP,valueDict['kDWT2'])
rEEDIreqCurrent = rEEDIreqCurrentFunc(wDWT,rEEDIreq)
if fuelType == 'HFO/Diesel':
Cco2ship = Cco2Func(parameterFile3,'HFO')
Cco2aux = Cco2Func(parameterFile3,'Diesel')
else:
Cco2ship = Cco2Func(parameterFile3,fuelType)
Cco2aux = Cco2Func(parameterFile3,fuelType)
_, EEDIreq = EEDIreqFunc(valueDict['kEEDI1'],wDWT,valueDict['kEEDI2'],rEEDIreqCurrent)
_, _, EEDIatt, vDsgnRed = EEDIattFunc(wDWT,valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],Cco2ship,valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],Cco2aux,EEDIreq,WPS,SPS,CCS)
return CAP, vDsgnRed, EEDIreq, EEDIatt
def _buttonCommandAnother(fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
CAP, vDsgnRed, EEDIreq, EEDIatt = _EEDIcalc(rEEDIreq,parameterFile3,valueDict)
if valueDict['vMin'] <= vDsgnRed and CAP >= 8000 and CAP <= 24000:
if v1.get() == 'HFO/Diesel':
fuelName = 'HFO'
else:
fuelName = v1.get()
fleetAll = orderShipFunc(fleetAll,numCompany,fuelName,int(v3.get()),int(v4.get()),int(v5.get()),float(v2.get()),tOpSch,tbid,0,currentYear,elapsedYear,valueDict,NShipFleet,False,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
fleetAll[numCompany]['total']['lastOrderFuel'] = v1.get()
fleetAll[numCompany]['total']['lastOrderCAP'] = v2.get()
cb1.delete(0,"end")
cb1.insert(0, fleetAll[numCompany]['total']['lastOrderCAP'])
v3.set('0')
v4.set('0')
v5.set('0')
cb2.var = v3
cb3.var = v4
cb4.var = v5
label6['text'] = 'None'
label7['text'] = 'None'
label8['text'] = 'None'
button1['state'] = 'disabled'
button2['state'] = 'disabled'
else:
button1['state'] = 'disabled'
button2['state'] = 'disabled'
def _buttonCommandComplete(root,fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
CAP, vDsgnRed, EEDIreq, EEDIatt = _EEDIcalc(rEEDIreq,parameterFile3,valueDict)
if valueDict['vMin'] <= vDsgnRed and CAP >= 8000 and CAP <= 24000:
if v1.get() == 'HFO/Diesel':
fuelName = 'HFO'
else:
fuelName = v1.get()
fleetAll = orderShipFunc(fleetAll,numCompany,fuelName,int(v3.get()),int(v4.get()),int(v5.get()),float(v2.get()),tOpSch,tbid,0,currentYear,elapsedYear,valueDict,NShipFleet,False,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
fleetAll[numCompany]['total']['lastOrderFuel'] = v1.get()
fleetAll[numCompany]['total']['lastOrderCAP'] = v2.get()
root.quit()
root.destroy()
else:
button1['state'] = 'disabled'
button2['state'] = 'disabled'
def _buttonCommandCheck(valueDict,parameterFile3,rEEDIreq):
CAP, vDsgnRed, EEDIreq, EEDIatt = _EEDIcalc(rEEDIreq,parameterFile3,valueDict)
label6['text'] = str(str(int(vDsgnRed)))
label7['text'] = str('{:.3g}'.format(EEDIreq))
label8['text'] = str('{:.3g}'.format(EEDIatt))
if valueDict['vMin'] < vDsgnRed:
button1['state'] = 'normal'
button2['state'] = 'normal'
if CAP >= 8000 and CAP <= 24000:
button1['state'] = 'normal'
button2['state'] = 'normal'
def _buttonCommandNoOrder(root):
root.quit()
root.destroy()
root = Tk()
root.title('Company '+str(numCompany)+' : Order Ship in '+str(currentYear))
width = 1000
height = 300
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
style.configure('new.TEntry', foreground='black', background=color)
style.configure('new.TCombobox', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
# Label
label1 = ttk.Label(frame, style='new.TLabel', text='Fuel type', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel', text='Capacity (8000<=Capacity<=24000) [TEU]', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel', text='Maximum speed [kt]', padding=(5, 2))
label4 = ttk.Label(frame, style='new.TLabel', text='EEDIreq [g/(ton*NM)]', padding=(5, 2))
label5 = ttk.Label(frame, style='new.TLabel', text='EEDIatt [g/(ton*NM)]', padding=(5, 2))
label6 = ttk.Label(frame, style='new.TLabel', text='None', padding=(5, 2))
label7 = ttk.Label(frame, style='new.TLabel', text='None', padding=(5, 2))
label8 = ttk.Label(frame, style='new.TLabel', text='None', padding=(5, 2))
label9 = ttk.Label(frame, style='new.TLabel', text='WPS', padding=(5, 2))
label10 = ttk.Label(frame, style='new.TLabel', text='SPS', padding=(5, 2))
label11 = ttk.Label(frame, style='new.TLabel', text='CCS', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: When you want to order a fleet, select the setting and click "another fleet" or "complete". Ohterwise, click "No order".', padding=(5, 2))
# List box
if currentYear < valueDict['addSysYear']:
fuelTypeList = ['HFO/Diesel','LNG']
else:
fuelTypeList = ['HFO/Diesel','LNG','NH3','H2']
v1 = StringVar()
lb = ttk.Combobox(frame, style='new.TCombobox', textvariable=v1,values=fuelTypeList)
if elapsedYear == 0:
lb.set('HFO/Diesel')
else:
lb.set(fleetAll[numCompany]['total']['lastOrderFuel'])
# Entry
v2 = StringVar()
if elapsedYear == 0:
v2.set('20000')
else:
v2.set(str(fleetAll[numCompany]['total']['lastOrderCAP']))
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v2)
# Checkbutton
v3 = StringVar()
v3.set('0') # 初期化
cb2 = ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), text='WPS', variable=v3)
# Checkbutton
v4 = StringVar()
v4.set('0') # 初期化
cb3 = ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), text='SPS', variable=v4)
# Checkbutton
v5 = StringVar()
if currentYear >= valueDict['addSysYear']:
v5.set('0') # 初期化
cb4 = ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), text='CCS', variable=v5)
else:
v5.set('0') # 初期化
cb4 = ttk.Checkbutton(frame, state='disable', style='new.TCheckbutton', padding=(10), text='CCS', variable=v5)
# Button
button1 = ttk.Button(frame, style='new.TButton', text='Another fleet', state='disabled', command=lambda: _buttonCommandAnother(fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5))
button2 = ttk.Button(frame, style='new.TButton', text='Complete', state='disabled', command=lambda: _buttonCommandComplete(root,fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5))
button3 = ttk.Button(frame, style='new.TButton', text='EEDI check', command=lambda: _buttonCommandCheck(valueDict,parameterFile3,rEEDIreq))
button4 = ttk.Button(frame, style='new.TButton', text='No order', command=lambda: _buttonCommandNoOrder(root))
# Layout
label1.grid(row=0, column=0)
label2.grid(row=0, column=1)
label3.grid(row=2, column=1)
label4.grid(row=2, column=2)
label5.grid(row=2, column=3)
label6.grid(row=3, column=1)
label7.grid(row=3, column=2)
label8.grid(row=3, column=3)
label9.grid(row=0, column=2)
label10.grid(row=0, column=3)
label11.grid(row=0, column=4)
cb1.grid(row=1, column=1)
cb2.grid(row=1, column=2)
cb3.grid(row=1, column=3)
cb4.grid(row=1, column=4)
lb.grid(row=1, column=0)
button1.grid(row=4, column=2)
button2.grid(row=4, column=4)
button3.grid(row=4, column=1)
button4.grid(row=4, column=0)
labelExpl.grid(row=5, column=0, columnspan=5)
root.deiconify()
root.mainloop()
return fleetAll
fleetAll = _orderShipGui(fleetAll,numCompany,valueDict,elapsedYear,tOpSch,tbid,currentYear,rEEDIreq,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
return fleetAll
def yearlyCtaFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,v,valueDict):
NumFleet = len(fleetAll[numCompany])
j = 0
maxCta = 0
currentYear = startYear+elapsedYear
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
fleetAll[numCompany][keyFleet]['v'][tOpTemp] = float(v[j].get()) # input for each fleet
fleetAll[numCompany][keyFleet]['d'][tOpTemp] = dFunc(valueDict["Dyear"],valueDict["Hday"],fleetAll[numCompany][keyFleet]['v'][tOpTemp],valueDict["Rrun"])
maxCta += NShipFleet * maxCtaFunc(fleetAll[numCompany][keyFleet]['CAPcnt'],fleetAll[numCompany][keyFleet]['d'][tOpTemp])
j += 1
fleetAll[numCompany]['total']['maxCta'][elapsedYear] = maxCta
return fleetAll
def yearlyOperationFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict,rSubs,rTax,parameterFile4):
NumFleet = len(fleetAll[numCompany])
currentYear = startYear+elapsedYear
fleetAll[numCompany]['total']['costRfrb'][elapsedYear] = 0
fleetAll[numCompany]['total']['g'][elapsedYear] = 0
fleetAll[numCompany]['total']['cta'][elapsedYear] = 0
fleetAll[numCompany]['total']['costFuel'][elapsedYear] = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
unitCostFuel, unitCostFuelHFO = unitCostFuelFunc(parameterFile4,fleetAll[numCompany][keyFleet]['fuelName'],currentYear)
fleetAll[numCompany][keyFleet]['cta'][tOpTemp] = ctaFunc(fleetAll[numCompany][keyFleet]['CAPcnt'],fleetAll[numCompany]['total']['rocc'][elapsedYear],fleetAll[numCompany][keyFleet]['d'][tOpTemp])
fleetAll[numCompany][keyFleet]['fShipORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fShip'][tOpTemp] = fShipFunc(valueDict["kShip1"],valueDict["kShip2"],fleetAll[numCompany][keyFleet]['wDWT'],fleetAll[numCompany][keyFleet]['wFLD'],fleetAll[numCompany]['total']['rocc'][elapsedYear],valueDict["CNM2km"],fleetAll[numCompany][keyFleet]['v'][tOpTemp],fleetAll[numCompany][keyFleet]['d'][tOpTemp],valueDict["rWPS"],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['CeqLHVship'])
fleetAll[numCompany][keyFleet]['fAuxORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fAux'][tOpTemp] = fAuxFunc(valueDict["Dyear"],valueDict["Hday"],valueDict["Rrun"],valueDict["kAux1"],valueDict["kAux2"],fleetAll[numCompany][keyFleet]['wDWT'],valueDict["rSPS"],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CeqLHVaux'])
fleetAll[numCompany][keyFleet]['gORG'][tOpTemp], fleetAll[numCompany][keyFleet]['g'][tOpTemp] = gFunc(fleetAll[numCompany][keyFleet]['Cco2ship'],fleetAll[numCompany][keyFleet]['fShip'][tOpTemp],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['fAux'][tOpTemp],valueDict["rCCS"],fleetAll[numCompany][keyFleet]['CCS'])
fleetAll[numCompany][keyFleet]['costFuelORG'][tOpTemp], fleetAll[numCompany][keyFleet]['costFuel'][tOpTemp], fleetAll[numCompany][keyFleet]['dcostFuel'][tOpTemp] = costFuelFunc(unitCostFuelHFO, unitCostFuel, fleetAll[numCompany][keyFleet]['fShipORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fAuxORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fShip'][tOpTemp], fleetAll[numCompany][keyFleet]['fAux'][tOpTemp])
fleetAll[numCompany][keyFleet]['dcostShipping'][tOpTemp] = additionalShippingFeeFunc(tOpTemp, tOpSch, fleetAll[numCompany][keyFleet]['dcostFuel'][tOpTemp], fleetAll[numCompany][keyFleet]['costShip'], fleetAll[numCompany][keyFleet]['costShipBasicHFO'])
fleetAll[numCompany][keyFleet]['gTilde'][tOpTemp] = fleetAll[numCompany][keyFleet]['g'][tOpTemp] / fleetAll[numCompany][keyFleet]['cta'][tOpTemp]
fleetAll[numCompany]['total']['costRfrb'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['costRfrb'][tOpTemp]
fleetAll[numCompany]['total']['g'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['g'][tOpTemp]
fleetAll[numCompany]['total']['cta'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['cta'][tOpTemp]
fleetAll[numCompany]['total']['costFuel'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['costFuel'][tOpTemp]
fleetAll[numCompany]['total']['dcostFuel'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['dcostFuel'][tOpTemp]
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
fleetAll[numCompany][keyFleet]['year'][fleetAll[numCompany][keyFleet]['tOp']] = currentYear
fleetAll[numCompany][keyFleet]['tOp'] += 1
fleetAll[numCompany]['total']['costAll'][elapsedYear] = fleetAll[numCompany]['total']['costFuel'][elapsedYear] + fleetAll[numCompany]['total']['costShip'][elapsedYear] + fleetAll[numCompany]['total']['costRfrb'][elapsedYear]
fleetAll[numCompany]['total']['dcostEco'][elapsedYear] = fleetAll[numCompany]['total']['dcostFuel'][elapsedYear] + fleetAll[numCompany]['total']['costShip'][elapsedYear]-fleetAll[numCompany]['total']['costShipBasicHFO'][elapsedYear] + fleetAll[numCompany]['total']['costRfrb'][elapsedYear]
fleetAll[numCompany]['total']['nTransCnt'][elapsedYear] = fleetAll[numCompany]['total']['cta'][elapsedYear] / valueDict['dJPNA']
fleetAll[numCompany]['total']['costCnt'][elapsedYear] = (valueDict['costCntMax']-valueDict['costCntMin']) / (1+math.e**(-valueDict['aSgmd']*(fleetAll[numCompany]['total']['rocc'][elapsedYear]-valueDict['roccNom']))) + valueDict['costCntMin']
fleetAll[numCompany]['total']['sale'][elapsedYear] = fleetAll[numCompany]['total']['nTransCnt'][elapsedYear] * fleetAll[numCompany]['total']['costCnt'][elapsedYear]
fleetAll[numCompany]['total']['gTilde'][elapsedYear] = fleetAll[numCompany]['total']['g'][elapsedYear] / fleetAll[numCompany]['total']['cta'][elapsedYear]
fleetAll[numCompany]['total']['costTilde'][elapsedYear] = fleetAll[numCompany]['total']['costAll'][elapsedYear] / fleetAll[numCompany]['total']['cta'][elapsedYear]
fleetAll[numCompany]['total']['saleTilde'][elapsedYear] = fleetAll[numCompany]['total']['sale'][elapsedYear] / fleetAll[numCompany]['total']['cta'][elapsedYear]
fleetAll[numCompany]['total']['mSubs'][elapsedYear] = rSubs * fleetAll[numCompany]['total']['dcostEco'][elapsedYear]
fleetAll[numCompany]['total']['mTax'][elapsedYear] = rTax * fleetAll[numCompany]['total']['g'][elapsedYear]
fleetAll[numCompany]['total']['balance'][elapsedYear] = fleetAll[numCompany]['total']['mTax'][elapsedYear] - fleetAll[numCompany]['total']['mSubs'][elapsedYear]
fleetAll[numCompany]['total']['profit'][elapsedYear] = fleetAll[numCompany]['total']['sale'][elapsedYear] - fleetAll[numCompany]['total']['costAll'][elapsedYear] - fleetAll[numCompany]['total']['balance'][elapsedYear]
fleetAll[numCompany]['total']['profitSum'][elapsedYear] += fleetAll[numCompany]['total']['profit'][elapsedYear]
fleetAll[numCompany]['total']['gSum'][elapsedYear] += fleetAll[numCompany]['total']['g'][elapsedYear]
fleetAll[numCompany]['total']['Idx'][elapsedYear] = fleetAll[numCompany]['total']['profitSum'][elapsedYear] / fleetAll[numCompany]['total']['gSum'][elapsedYear]
return fleetAll
def decideSpeedFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
def _surviceSpeedGui(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
def _buttonCommandNext(root,fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
NumFleet = len(fleetAll[numCompany])
j = 0
goAhead = True
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if float(v13[j].get()) < 12 or float(v13[j].get()) > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
goAhead = False
j += 1
if goAhead:
fleetAll = yearlyCtaFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,v13,valueDict)
fleetAll[numCompany]['total']['atOnce'][elapsedYear] = float(vAtOnce.get())
root.quit()
root.destroy()
else:
button2['state'] = 'disabled'
def _buttonCommandNext2(root):
root.quit()
root.destroy()
def _buttonCommandCalc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
fleetAll = yearlyCtaFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,v13,valueDict)
#labelRes4['text'] = str('{:.3g}'.format(fleetAll[numCompany]['total']['cta'][elapsedYear]))
#labelRes6['text'] = str('{:.4g}'.format(fleetAll[numCompany]['total']['rocc'][elapsedYear]))
#labelRes8['text'] = str('{:.3g}'.format(fleetAll[numCompany]['total']['costFuel'][elapsedYear]))
#labelRes10['text'] = str('{:.3g}'.format(fleetAll[numCompany]['total']['g'][elapsedYear]))
button2['state'] = 'normal'
NumFleet = len(fleetAll[numCompany])
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if float(v13[j].get()) < 12 or float(v13[j].get()) > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
button2['state'] = 'disabled'
j += 1
def _buttonCommandAtOnce():
NumFleet = len(fleetAll[numCompany])
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
#if fleetAll[numCompany][keyFleet]['v'][tOpTemp-1] == 0:
# v13[j].set(str(int(min([float(vAtOnce.get()),fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]]))))
#else:
# v13[j].set(str(int(min([float(vAtOnce.get()),fleetAll[numCompany][keyFleet]['v'][tOpTemp-1]]))))
v13[j].set(str(int(min([float(vAtOnce.get()),fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]]))))
j += 1
button1['state'] = 'normal'
root = Tk()
root.title('Company '+str(numCompany)+' : Service Speed in '+str(startYear+elapsedYear))
width = 1100
height = 400
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
canvas = Canvas(root, width=width, height=height)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
style.configure('new.TEntry', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
frame.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
vbar = Scrollbar(root, orient="vertical")
vbar.config(command=canvas.yview)
vbar.pack(side=RIGHT,fill="y")
canvas['bg'] = color
canvas.create_window((placeX, placeY), window=frame, anchor=CENTER)
canvas.pack()
canvas.update_idletasks()
canvas.configure(yscrollcommand=vbar.set)
canvas.yview_moveto(0)
# Label
labelAtOnce = ttk.Label(frame, style='new.TLabel', text='Input all service speeds at once (12<=) [kt]:', padding=(5, 2))
vAtOnce = StringVar()
if elapsedYear == 0:
vAtOnce.set('18')
else:
vAtOnce.set(str(int(fleetAll[numCompany]['total']['atOnce'][elapsedYear-1])))
labelAtOnce2 = ttk.Entry(frame, style='new.TEntry', textvariable=vAtOnce)
#labelRes1 = ttk.Label(frame, style='new.TLabel',text='Assigned demand [TEU*NM]:', padding=(5, 2))
#labelRes2 = ttk.Label(frame, style='new.TLabel',text=str('{:.3g}'.format(Di)), padding=(5, 2))
#labelRes3 = ttk.Label(frame, style='new.TLabel',text='Cargo trasnsport amount [TEU*NM]:', padding=(5, 2))
#labelRes4 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
#labelRes5 = ttk.Label(frame, style='new.TLabel',text='Occupancy rate [%]:', padding=(5, 2))
#labelRes6 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
#labelRes7 = ttk.Label(frame, style='new.TLabel',text='Fuel cost [$]:', padding=(5, 2))
#labelRes8 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
#labelRes9 = ttk.Label(frame, style='new.TLabel',text='CO2 [ton]:', padding=(5, 2))
#labelRes10 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
label0 = ttk.Label(frame, style='new.TLabel',text='No.', padding=(5, 2))
label1 = ttk.Label(frame, style='new.TLabel',text='Fuel type', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel',text='Capacity [TEU]', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel',text='WPS', padding=(5, 2))
label4 = ttk.Label(frame, style='new.TLabel',text='SPS', padding=(5, 2))
label5 = ttk.Label(frame, style='new.TLabel',text='CCS', padding=(5, 2))
label6 = ttk.Label(frame, style='new.TLabel',text='Service speed (12<=) [kt]', padding=(5, 2))
label7 = ttk.Label(frame, style='new.TLabel',text='Maximum speed [kt]', padding=(5, 2))
label00 = []
label8 = []
label9 = []
label10 = []
label11 = []
label12 = []
label13 = []
label14 = []
v13 = []
currentYear = startYear+elapsedYear
NumFleet = len(fleetAll[numCompany])
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
label00.append(ttk.Label(frame, style='new.TLabel',text=str(keyFleet), padding=(5, 2)))
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['fuelName'] == 'HFO':
label8.append(ttk.Label(frame, style='new.TLabel',text='HFO/Diesel', padding=(5, 2)))
else:
label8.append(ttk.Label(frame, style='new.TLabel',text=fleetAll[numCompany][keyFleet]['fuelName'], padding=(5, 2)))
label9.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['CAPcnt'])), padding=(5, 2)))
if fleetAll[numCompany][keyFleet]['WPS']:
label10.append(ttk.Label(frame, style='new.TLabel',text='Yes', padding=(5, 2)))
else:
label10.append(ttk.Label(frame, style='new.TLabel',text='No', padding=(5, 2)))
if fleetAll[numCompany][keyFleet]['SPS']:
label11.append(ttk.Label(frame, style='new.TLabel',text='Yes', padding=(5, 2)))
else:
label11.append(ttk.Label(frame, style='new.TLabel',text='No', padding=(5, 2)))
if fleetAll[numCompany][keyFleet]['CCS']:
label12.append(ttk.Label(frame, style='new.TLabel',text='Yes', padding=(5, 2)))
else:
label12.append(ttk.Label(frame, style='new.TLabel',text='No', padding=(5, 2)))
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
v13.append(StringVar())
if fleetAll[numCompany][keyFleet]['v'][tOpTemp-1] == 0:
#v13[-1].set(str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp])))
v13[-1].set(str(int(18)))
else:
v13[-1].set(str(int(fleetAll[numCompany][keyFleet]['v'][tOpTemp-1])))
#v13[-1].set('None')
label13.append(ttk.Entry(frame, style='new.TEntry',textvariable=v13[-1]))
label14.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp])), padding=(5, 2)))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input a service speed for all fleets at first and click "Input", and then change each speed if you want. After inputting all values, click "Check" and "Next".', padding=(5, 2))
labelExpl2 = ttk.Label(frame, style='new.TLabel', text='Guide: You have no fleet. Click "Next".', padding=(5, 2))
# Button
button1 = ttk.Button(frame, style='new.TButton',text='Check', state='disabled', command=lambda: _buttonCommandCalc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict))
button2 = ttk.Button(frame, style='new.TButton',text='Next', state='disabled', command=lambda: _buttonCommandNext(root,fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict))
button22 = ttk.Button(frame, style='new.TButton',text='Next', command=lambda: _buttonCommandNext2(root))
button3 = ttk.Button(frame, style='new.TButton',text='Input', command=lambda: _buttonCommandAtOnce())
# Layout
if len(label8) > 0:
#labelRes1.grid(row=0, column=1)
#labelRes2.grid(row=0, column=2)
#labelRes3.grid(row=0, column=1)
#labelRes4.grid(row=0, column=2)
#labelRes5.grid(row=1, column=1)
#labelRes6.grid(row=1, column=2)
#labelRes7.grid(row=1, column=4)
#labelRes8.grid(row=1, column=5)
#labelRes9.grid(row=2, column=1)
#labelRes10.grid(row=2, column=2)
label0.grid(row=3, column=0)
label1.grid(row=3, column=1)
label2.grid(row=3, column=2)
label3.grid(row=3, column=3)
label4.grid(row=3, column=4)
label5.grid(row=3, column=5)
label6.grid(row=3, column=6)
label7.grid(row=3, column=7)
for i, j in enumerate(label8):
label00[i].grid(row=i+4, column=0)
label8[i].grid(row=i+4, column=1)
label9[i].grid(row=i+4, column=2)
label10[i].grid(row=i+4, column=3)
label11[i].grid(row=i+4, column=4)
label12[i].grid(row=i+4, column=5)
label13[i].grid(row=i+4, column=6)
label14[i].grid(row=i+4, column=7)
labelAtOnce.grid(row=i+5, column=1)
labelAtOnce2.grid(row=i+5, column=2)
button3.grid(row=i+5, column=3)
button1.grid(row=i+5, column=6)
button2.grid(row=i+5, column=7)
labelExpl.grid(row=i+6, column=0,columnspan=8)
else:
labelExpl2.grid(row=0, column=0)
button22.grid(row=0, column=1)
root.deiconify()
root.mainloop()
return fleetAll
fleetAll = _surviceSpeedGui(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict)
return fleetAll
def outputGuiFunc(fleetAll,startYear,elapsedYear,lastYear,tOpSch,unitDict):
def _eachFrame(frame,fig,keyi,keyList,root):
'''
def _on_key_press(event):
#print("you pressed {}".format(event.key))
key_press_handler(event, canvas, toolbar)
'''
def _buttonCommandNext(root,fig):
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi].clf()
plt.close(fig[keyi])
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
def _buttonCommandShow(frameShow):
frameShow.tkraise()
frameEach = frame[keyi]
frameEach.grid(row=0, column=0, sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
'''
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
canvas.get_tk_widget().grid(row=1, column=0)
canvas.mpl_connect("key_press_event", _on_key_press)
'''
# Button
button1 = Button(master=frameEach, text="Next Year", command=lambda: _buttonCommandNext(root,fig))
button1.place(relx=0.22, rely=0.9)
button2 = Button(master=frameEach, text="Show", command=lambda: _buttonCommandShow(frame[v.get()]))
button2.place(relx=0.59, rely=0.9)
# List box
v = StringVar()
lb = ttk.Combobox(frameEach,textvariable=v,values=keyList)
lb.set(keyi)
lb.place(relx=0.66, rely=0.9)
# Tkinter Class
root = Tk()
root.title('Result in '+str(startYear+elapsedYear))
root.geometry('800x600+300+200')
width = 800
height = 600
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
fig = {}
frame = {}
keyList = list(fleetAll[1]['total'].keys())
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi] = outputAllCompany2Func(fleetAll,startYear,elapsedYear,keyi,unitDict)
frame[keyi] = ttk.Frame(root, height=height, width=width)
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
_eachFrame(frame,fig,keyi,keyList,root)
frame[keyList[0]].tkraise()
# root
mainloop()
def outputGui2Func(fleetAll,valueDict,startYear,elapsedYear,lastYear,tOpSch,unitDict):
def _eachFrameCO2(frame,fig,keyi,ifTotal):
def _buttonCommandShow(totalOrTilde):
if totalOrTilde == 'Total':
frameTotal[keyi].tkraise()
else:
frameComp[keyi].tkraise()
frameEach = frame[keyi]
frameEach.grid(row=0, column=1, pady=0,sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
# Button
v1 = StringVar()
button1 = ttk.Combobox(frameEach,textvariable=v1,values=['Total','Each company'])
if ifTotal:
button1.set('Total')
else:
button1.set('Each company')
button1.place(relx=0.45, rely=0.9)
button2 = Button(master=frameEach, text="Show", command=lambda: _buttonCommandShow(v1.get()))
button2.place(relx=0.8, rely=0.9)
def _eachFrameProfit(frame,fig,keyi):
frameEach = frame[keyi]
frameEach.grid(row=0, column=0, pady=0,sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
def _eachFrameIndex(frame,fig,keyi):
frameEach = frame[keyi]
frameEach.grid(row=1, column=0, pady=0,sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
def _eachFrameSel(frame,fig,keyi,keyList,ifSelTotal):
def _buttonCommandShow(keyi,ifTotal):
if ifTotal == 'Total':
frameSelTotal[keyi].tkraise()
else:
frameSel[keyi].tkraise()
def _buttonCommandNext(root,fig):
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi].clf()
figTotal[keyi].clf()
plt.close(fig[keyi])
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
frameEach = frame[keyi]
frameEach.grid(row=1, column=1, pady=0, sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
# List box
v1 = StringVar()
lb = ttk.Combobox(frameEach,textvariable=v1,values=keyList)
lb.set(keyi)
lb.place(relx=0.45, rely=0.9)
# Button
v2 = StringVar()
button1 = ttk.Combobox(frameEach,textvariable=v2,values=['Total','Each company'])
if ifSelTotal:
button1.set('Total')
else:
button1.set('Each company')
button1.place(relx=0.02, rely=0.9)
button2 = Button(master=frameEach, text="Show", command=lambda: _buttonCommandShow(v1.get(),v2.get()))
button2.place(relx=0.8, rely=0.9)
buttonNext = Button(master=root, text="Next Year", command=lambda: _buttonCommandNext(root,fig))
buttonNext.place(relx=0.9, rely=0.9)
# Tkinter Class
root = Tk()
root.title('Result in '+str(startYear+elapsedYear))
width = root.winfo_screenwidth()-400
height = root.winfo_screenheight()-80
placeX = 0
placeY = 0
widgetSize = str(int(width))+'x'+str(int(height))+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
fig = {}
frameComp = {}
frameTotal = {}
frameSel = {}
frameSelTotal = {}
figTotal = {}
removeList = []
keyList = list(fleetAll[1]['total'].keys())
figWidth,figHeight = width/2-50, height/2
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi] = outputAllCompany2Func(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth/100-1,figHeight/100-1)
figTotal[keyi] = outputAllCompanyTotalFunc(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth/100-1,figHeight/100-1)
frameComp[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
frameTotal[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
frameSel[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
frameSelTotal[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
else:
removeList.append(keyi)
for keyi in removeList:
keyList.remove(keyi)
_eachFrameCO2(frameComp,fig,'g',False)
_eachFrameCO2(frameTotal,figTotal,'g',True)
_eachFrameProfit(frameComp,fig,'profit')
_eachFrameIndex(frameComp,fig,'Idx')
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
_eachFrameSel(frameSel,fig,keyi,keyList,False)
_eachFrameSel(frameSelTotal,figTotal,keyi,keyList,True)
#frame[keyList[0]].tkraise()
# root
mainloop()
return fleetAll
def outputEachCompanyFunc(fleetAll,numCompany,startYear,elapsedYear,lastYear,tOpSch,decisionListName):
fig, ax = plt.subplots(2, 2, figsize=(10.0, 10.0))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
fleetAll[numCompany]['total'] = fleetAll[numCompany]['total']
SPlot = fleetAll[numCompany]['total']['S'][:elapsedYear+1]
ax[0,0].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['S'][:elapsedYear+1])
ax[0,0].set_title(r"$ ( \Delta C_{shipping} - \alpha g) \ / \ cta$")
ax[0,0].set_xlabel('Year')
ax[0,0].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[0,0].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
#ax[0].set_ylabel('Year')
gTildePlot = fleetAll[numCompany]['total']['gTilde'][:elapsedYear+1]*1000000
ax[1,0].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['gTilde'][:elapsedYear+1]*1000000)
ax[1,0].set_title("g / cta")
ax[1,0].set_xlabel('Year')
ax[1,0].set_ylabel('g / (TEU $\cdot$ NM)')
#ax[1,0].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[1,0].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
gPlot = fleetAll[numCompany]['total']['g'][:elapsedYear+1]/1000000
ax[0,1].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['g'][:elapsedYear+1]/1000000)
ax[0,1].set_title("g")
ax[0,1].set_xlabel('Year')
ax[0,1].set_ylabel('Millions ton')
ax[0,1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[0,1].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
dcostShippingTildePlot = fleetAll[numCompany]['total']['dcostShippingTilde'][:elapsedYear+1]
ax[1,1].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['dcostShippingTilde'][:elapsedYear+1])
ax[1,1].set_title("$\Delta C_{shipping} \ / \ cta$")
ax[1,1].set_xlabel('Year')
ax[1,1].set_ylabel('\$ / (TEU $\cdot$ NM)')
ax[1,1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[1,1].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
#if i == 1:
# ax2.bar(fleetAll['year'][:elapsedYear+1], simu)
#else:
# ax2.bar(fleetAll['year'][:elapsedYear+1], simu, bottom=simuSum)
#fig.tight_layout()
if os.name == 'nt':
plt.show()
elif os.name == 'posix':
plt.savefig("Company"+str(numCompany)+decisionListName+".jpg")
np.savetxt("Company"+str(numCompany)+decisionListName+'_S.csv',SPlot)
np.savetxt("Company"+str(numCompany)+decisionListName+'_gTilde.csv',gTildePlot)
np.savetxt("Company"+str(numCompany)+decisionListName+'_g.csv',gPlot)
np.savetxt("Company"+str(numCompany)+decisionListName+'_dcostShippingTilde.csv',dcostShippingTildePlot)
def outputAllCompanyFunc(fleetAll,startYear,elapsedYear,lastYear,tOpSch,unitDict):
currentYear = startYear+elapsedYear
if elapsedYear > 0:
year = fleetAll['year'][:elapsedYear+1]
fig, axes = plt.subplots(3, 6, figsize=(16.0, 9.0))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
for numCompany in range(1,4):
for ax, keyi in zip(fig.axes, fleetAll[numCompany]['total'].keys()):
ax.plot(year,fleetAll[numCompany]['total'][keyi][:elapsedYear+1],label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.legend()
#ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
ax.title.set_size(10)
ax.xaxis.label.set_size(10)
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.get_xaxis().set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(year)
ax.yaxis.label.set_size(10)
else:
fig, axes = plt.subplots(3, 6, figsize=(16.0, 9.0))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
for numCompany in range(1,4):
for ax, keyi in zip(fig.axes, fleetAll[numCompany]['total'].keys()):
ax.scatter(startYear,fleetAll[numCompany]['total'][keyi][0],label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.legend()
#ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
ax.title.set_size(10)
ax.xaxis.label.set_size(10)
#ax.set_xticks(np.array([startYear-1,startYear,startYear+1]))
ax.set_xticks(np.array([startYear]))
ax.yaxis.label.set_size(10)
'''
if os.name == 'nt':
plt.show()
elif os.name == 'posix':
plt.savefig("TotalValues.jpg")
for j, listName in enumerate(decisionListNameList,1):
valueName = []
outputList = []
for i, keyi in enumerate(fleetAll[j]['total'].keys(),1):
valueName.append(keyi)
outputList.append(fleetAll[j]['total'][keyi][:elapsedYear+1])
outputData = np.stack(outputList,1)
outputDf = pd.DataFrame(data=outputData, index=year, columns=valueName, dtype='float')
outputDf.to_csv("Company"+str(j)+'_'+listName+'.csv')
'''
'''
figDict = {}
for j, listName in enumerate(decisionListNameList,1):
for keyFleet in fleetAll[j].keys():
valueName = []
outputList = []
if type(keyFleet) is int:
for keyValue in fleetAll[j][keyFleet].keys():
if type(fleetAll[j][keyFleet][keyValue]) is np.ndarray:
valueName.append(keyValue)
#if keyFleet == 1 and j == 1:
# fig, ax = plt.subplots(1, 1, figsize=(12.0, 8.0))
# figDict.setdefault(keyValue,ax)
plotArr = np.zeros(lastYear-startYear+1)
if fleetAll[j][keyFleet]['delivery'] >= startYear:
plotArr[fleetAll[j][keyFleet]['delivery']-startYear:fleetAll[j][keyFleet]['delivery']-startYear+fleetAll[j][keyFleet]['tOp']] = fleetAll[j][keyFleet][keyValue][:fleetAll[j][keyFleet]['tOp']]
else:
plotArr[:tOpSch-startYear+fleetAll[j][keyFleet]['delivery']] = fleetAll[j][keyFleet][keyValue][startYear-fleetAll[j][keyFleet]['delivery']:fleetAll[j][keyFleet]['tOp']]
outputList.append(plotArr)
#figDict[keyValue].plot(year,plotArr,label="Fleet"+str(keyFleet))
#figDict[keyValue].set_title(keyValue)
#figDict[keyValue].set_xlabel('Year')
#figDict[keyValue].legend()
#figDict[keyValue].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
#figDict[keyValue].set_ylabel(unitDict[keyValue])
#if j == len(decisionListNameList) and keyFleet == len(list(fleetAll[j].keys()))-1 and os.name == 'nt':
# plt.show()
#elif j == len(decisionListNameList) and keyFleet == len(list(fleetAll[j].keys()))-1 and os.name == 'posix':
# plt.savefig(str(keyValue)+".jpg")
if os.name == 'posix':
outputData = np.stack(outputList,1)
outputDf = pd.DataFrame(data=outputData, index=year, columns=valueName, dtype='float')
outputDf.to_csv("Company"+str(j)+'_'+listName+'_'+'Fleet'+str(keyFleet)+'.csv')
'''
return fig
def outputAllCompany2Func(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth,figHeight):
plt.rcParams.update({'figure.max_open_warning': 0})
currentYear = startYear+elapsedYear
#fig, ax = plt.subplots(1, 1, figsize=(figWidth, figHeight))
fig = Figure(figsize=(figWidth, figHeight))
ax = fig.add_subplot(1,1,1)
#plt.subplots_adjust(wspace=0.4, hspace=0.6)
ticArr = np.array([2020,2025,2030,2035,2040,2045,2050])
if elapsedYear > 0:
year = fleetAll['year'][:elapsedYear+1]
for numCompany in range(1,4):
if numCompany == 1:
color = 'tomato'
elif numCompany == 2:
color = 'gold'
elif numCompany == 3:
color = 'royalblue'
ax.plot(year,fleetAll[numCompany]['total'][keyi][:elapsedYear+1],color=color, marker=".",label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
#ax.title.set_size(10)
#ax.xaxis.label.set_size(10)
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.get_xaxis().set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(ticArr)
#ax.yaxis.label.set_size(10)
else:
for numCompany in range(1,4):
if numCompany == 1:
color = 'tomato'
elif numCompany == 2:
color = 'gold'
elif numCompany == 3:
color = 'royalblue'
ax.scatter(startYear,fleetAll[numCompany]['total'][keyi][0],color=color,label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
#ax.title.set_size(10)
#ax.xaxis.label.set_size(10)
#ax.set_xticks(np.array([startYear-1,startYear,startYear+1]))
ax.set_xticks(np.array([startYear]))
#ax.yaxis.label.set_size(10)
if keyi == 'g':
IMOgoal = np.full(ticArr.shape,valueDict['IMOgoal']/3)
color = 'olivedrab'
ax.plot(ticArr,IMOgoal,color=color, marker=".",label="IMO goal")
y_min, y_max = ax.get_ylim()
ax.set_ylim(0, y_max)
ax.legend()
return fig
def outputAllCompanyTotalFunc(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth,figHeight):
plt.rcParams.update({'figure.max_open_warning': 0})
currentYear = startYear+elapsedYear
#fig, ax = plt.subplots(1, 1, figsize=(figWidth, figHeight))
fig = Figure(figsize=(figWidth, figHeight))
ax = fig.add_subplot(1,1,1)
#plt.subplots_adjust(wspace=0.4, hspace=0.6)
ticArr = np.array([2020,2025,2030,2035,2040,2045,2050])
if elapsedYear >= 0:
year = fleetAll['year'][:elapsedYear+1]
for numCompany in range(1,4):
if numCompany == 1:
color = 'tomato'
elif numCompany == 2:
color = 'gold'
elif numCompany == 3:
color = 'royalblue'
#ax.plot(year,fleetAll[numCompany]['total'][keyi][:elapsedYear+1],color=color, marker=".",label="Company"+str(numCompany))
tempArr = copy.deepcopy(fleetAll[numCompany]['total'][keyi][:elapsedYear+1])
if numCompany == 1:
barArray = tempArr
ax.bar(year, barArray, color=color, label="Company"+str(numCompany))
else:
ax.bar(year, tempArr, bottom=barArray, color=color, label="Company"+str(numCompany))
barArray += tempArr
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
#ax.title.set_size(10)
#ax.xaxis.label.set_size(10)
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.get_xaxis().set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(ticArr)
#ax.yaxis.label.set_size(10)
'''else:
for numCompany in range(1,4):
ax.scatter(startYear,fleetAll[numCompany]['total'][keyi][0],label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.legend()
ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
#ax.title.set_size(10)
#ax.xaxis.label.set_size(10)
#ax.set_xticks(np.array([startYear-1,startYear,startYear+1]))
ax.set_xticks(np.array([startYear]))
#ax.yaxis.label.set_size(10)'''
if keyi == 'g':
IMOgoal = np.full(ticArr.shape,valueDict['IMOgoal'])
color = 'olivedrab'
ax.plot(ticArr,IMOgoal,color=color, marker=".",label="IMO goal")
y_min, y_max = ax.get_ylim()
ax.set_ylim(0, y_max)
ax.legend()
return fig
def outputCsvFunc(fleetAll,startYear,elapsedYear,lastYear,tOpSch):
resPath = Path(__file__).parent
resPath /= '../results'
shutil.rmtree(resPath)
os.mkdir(resPath)
year = fleetAll['year'][:elapsedYear+1]
tOps = np.arange(tOpSch)
for numCompany in range(1,4):
valueName = []
outputList = []
for keyi in fleetAll[numCompany]['total'].keys():
if type(fleetAll[numCompany]['total'][keyi]) is np.ndarray:
valueName.append(keyi)
outputList.append(fleetAll[numCompany]['total'][keyi][:elapsedYear+1])
outputData1 = np.stack(outputList,1)
outputDf1 =
|
pd.DataFrame(data=outputData1, index=year, columns=valueName, dtype='float')
|
pandas.DataFrame
|
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# Load The Data Into DataFrame with Pandas
iris = load_iris()
X =
|
pd.DataFrame(iris.data)
|
pandas.DataFrame
|
from typing import Tuple
import tensorflow as tf
from pandas import DataFrame
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Concatenate, Dense, Embedding, Dropout, BatchNormalization, Dot
from tensorflow.keras.models import Model
from tensorflow.python.data.ops.dataset_ops import DatasetV2
from tensorflow.python.distribute.distribute_lib import Strategy
from src.models.RModel import RModel
class NeuMFModel(RModel):
def __init__(self):
super().__init__('NeuMFModel')
def readData(self, path, rowLimit) -> {int, int, DataFrame}:
file = self.dataStore.openFile(path=path, mode='r')
df = pd.read_csv(file, nrows=rowLimit)
transactionDf = df.drop(['MATERIAL', 'QUANTITY'], axis=1)
numUser = transactionDf.CUSTOMER_ID.max() + 1
numItem = transactionDf.PRODUCT_ID.max() + 1
return numItem, numUser, transactionDf
def prepareToTrain(self, distributedConfig, path, rowLimit) -> Tuple[DatasetV2, DatasetV2, list]:
numItem, numUser, transactionDf = self.readData(path, rowLimit)
trainSplit, testSplit = train_test_split(transactionDf, test_size=self.testSize)
products = testSplit.PRODUCT_ID.unique().tolist()
users = testSplit.CUSTOMER_ID.unique().tolist()
|
pd.DataFrame({self.PRODUCT_ID: products})
|
pandas.DataFrame
|
# ----------------------------------------------------------------------------
# Copyright (c) 2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
import numpy as np
from skbio.stats.distance import DistanceMatrix
from qiime2.plugin.testing import TestPluginBase
from qiime2.plugin import ValidationError
from qiime2 import Metadata
from q2_fmt._engraftment import group_timepoints
from q2_fmt._stats import wilcoxon_srt, mann_whitney_u
from q2_fmt._examples import (faithpd_timedist_factory,
faithpd_refdist_factory)
from q2_fmt._validator import (validate_all_dist_columns_present,
validate_unique_subjects_within_group)
class TestBase(TestPluginBase):
package = 'q2_fmt.tests'
def setUp(self):
super().setUp()
self.md_beta = Metadata.load(self.get_data_path(
'sample_metadata_donors.tsv'))
self.md_alpha = Metadata.load(self.get_data_path(
'sample_metadata_alpha_div.tsv'))
self.dm = DistanceMatrix.read(self.get_data_path(
'dist_matrix_donors.tsv')).to_series()
self.alpha = pd.read_csv(self.get_data_path('alpha_div.tsv'),
sep='\t', index_col=0, squeeze=True)
self.faithpd_timedist = faithpd_timedist_factory().view(pd.DataFrame)
self.faithpd_refdist = faithpd_refdist_factory().view(pd.DataFrame)
class ErrorMixins:
def test_with_time_column_input_not_in_metadata(self):
with self.assertRaisesRegex(ValueError,
'time_column.*foo.*metadata'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='foo',
reference_column='relevant_donor',
control_column='control')
def test_with_reference_column_input_not_in_metadata(self):
with self.assertRaisesRegex(ValueError,
'reference_column.*foo.*metadata'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='days_post_transplant',
reference_column='foo',
control_column='control')
def test_with_control_column_input_not_in_metadata(self):
with self.assertRaisesRegex(ValueError,
'control_column.*foo.*metadata'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='days_post_transplant',
reference_column='relevant_donor',
control_column='foo')
def test_with_non_numeric_time_column(self):
with self.assertRaisesRegex(ValueError,
'time_column.*categorical.*numeric'):
group_timepoints(diversity_measure=self.div,
metadata=self.md,
time_column='non_numeric_time_column',
reference_column='relevant_donor',
control_column='control')
class TestAlphaErrors(TestBase, ErrorMixins):
def setUp(self):
super().setUp()
self.div = self.alpha
self.md = self.md_alpha
class TestBetaErrors(TestBase, ErrorMixins):
def setUp(self):
super().setUp()
self.div = self.dm
self.md = self.md_beta
class TestGroupTimepoints(TestBase):
# Beta Diversity (Distance Matrix) Test Cases
def test_beta_dists_with_donors_and_controls(self):
exp_time_df = pd.DataFrame({
'id': ['sampleA', 'sampleB', 'sampleC', 'sampleD', 'sampleE'],
'measure': [0.45, 0.40, 0.28, 0.78, 0.66],
'group': [7.0, 7.0, 9.0, 11.0, 11.0]
})
exp_ref_df = pd.DataFrame({
'id': ['donor1..donor2', 'donor1..donor3', 'donor2..donor3',
'sampleB..sampleC', 'sampleB..sampleD', 'sampleC..sampleD'],
'measure': [0.24, 0.41, 0.74, 0.37, 0.44, 0.31],
'group': ['reference', 'reference', 'reference',
'control1', 'control1', 'control1'],
'A': ['donor1', 'donor1', 'donor2',
'sampleB', 'sampleB', 'sampleC'],
'B': ['donor2', 'donor3', 'donor3',
'sampleC', 'sampleD', 'sampleD']
})
time_df, ref_df = group_timepoints(diversity_measure=self.dm,
metadata=self.md_beta,
time_column='days_post_transplant',
reference_column='relevant_donor',
control_column='control')
pd.testing.assert_frame_equal(time_df, exp_time_df)
pd.testing.assert_frame_equal(ref_df, exp_ref_df)
def test_beta_dists_with_donors_controls_and_subjects(self):
exp_time_df = pd.DataFrame({
'id': ['sampleA', 'sampleB', 'sampleC', 'sampleD', 'sampleE'],
'measure': [0.45, 0.40, 0.28, 0.78, 0.66],
'group': [7.0, 7.0, 9.0, 11.0, 11.0],
'subject': ['subject1', 'subject2',
'subject1', 'subject1', 'subject2']
})
exp_ref_df = pd.DataFrame({
'id': ['donor1..donor2', 'donor1..donor3', 'donor2..donor3',
'sampleB..sampleC', 'sampleB..sampleD', 'sampleC..sampleD'],
'measure': [0.24, 0.41, 0.74, 0.37, 0.44, 0.31],
'group': ['reference', 'reference', 'reference',
'control1', 'control1', 'control1'],
'A': ['donor1', 'donor1', 'donor2',
'sampleB', 'sampleB', 'sampleC'],
'B': ['donor2', 'donor3', 'donor3',
'sampleC', 'sampleD', 'sampleD']
})
time_df, ref_df = group_timepoints(diversity_measure=self.dm,
metadata=self.md_beta,
time_column='days_post_transplant',
reference_column='relevant_donor',
control_column='control',
subject_column='subject')
|
pd.testing.assert_frame_equal(time_df, exp_time_df)
|
pandas.testing.assert_frame_equal
|
import os
import pandas as pd
import module_debug
import importlib as implib
from typing import List, Dict, Tuple, Union
import module_dataset_analysis; implib.reload(module_dataset_analysis)
import module_io; implib.reload(module_io)
p = print
#____________________________________________________________________________________________________________________________________
def rename_hrv(df: pd.DataFrame
) -> pd.DataFrame:
"""
:param df: df with standardized glyco features
:return dataframe with renamed columns
"""
# get column names
column_names = df.columns.to_list()
# rename columns
for i in range(len(column_names)):
column_names[i] = column_names[i].replace('(‰)', '')\
.replace(' Prima', '-1').replace(' Secunda', '-2').replace(' Tertia', '-3')
# assign column names
df.columns = column_names
return df
#____________________________________________________________________________________________________________________________________
def read_test_ecg_lt3c(source_dir: str,
intervals: List[str],
drop_frequency_domain: bool = True,
stop_after: Union[int, float] = float('inf'),
write_dir: str = None) -> Union[Tuple[dict, pd.DataFrame], None]:
"""
NOTE: to be refactored to work with all datasets
Parse files downloaded from test.ecg
:param source_dir: dir of test.ecg files
:param intervals: the intervals for which to parse the files, with corresponding time unit. ex: ['1h', '2h, ... '24h']
:param drop_frequency_domain: if True, don't consider frequency domain parameters
:param stop_after: stop after certain amount of patients if debugging or some other rechecking is needed
:param write_dir: write directory
:return: if no write directory is specified, the parsed dataframes are returned together with the samples and patients info
"""
# initialize paths and files
source = source_dir
patients_classification_path = 'C:\\Users\\ilija\\Pycharm Projects\\GLYCO\\DATA\\Classification_of_Patients\\NewClasses__ND_GD_BD.csv'
patients_classification = pd.read_csv(patients_classification_path)
patients_hba1c_path = 'C:\\Users\\ilija\\Pycharm Projects\\GLYCO\\DATA\\Clinical_Records\\PatientID_Hba1C_06102020.csv'
patients_hba1c = pd.read_csv(patients_hba1c_path)
# print classes info
module_debug.line()
p('Total number of patients in the classification file:')
p(patients_classification['Patient_ID'].nunique())
p('Number of non-diabetic patients:')
p(patients_classification[patients_classification['Class'] == 'ND']['Patient_ID'].nunique())
p('Number of diabetic patients:')
p(patients_classification[patients_classification['Class'] != 'ND']['Patient_ID'].nunique())
p('Number of diabetic patients with good regulation:')
p(patients_classification[patients_classification['Class'] == 'GD']['Patient_ID'].nunique())
p('Number of diabetic patients with bad regulation:')
p(patients_classification[patients_classification['Class'] == 'BD']['Patient_ID'].nunique())
module_debug.line()
# print patients info
module_debug.line()
p('Number of patients in source directory: ')
p(len(os.listdir(source)))
module_debug.line()
# initialize inspection dict, that makes sure everything works as it should
inspection_dict = dict(
total_samples = 0,
all_patients = [i[:-5] for i in os.listdir(source)]
)
for interval in intervals:
inspection_dict[f'samples_{interval}'] = 0
inspection_dict[f'samples_{interval}'] = 0
inspection_dict[f'patients_without_{interval}'] = list()
inspection_dict[f'patients_without_{interval}'] = list()
inspection_dict[f'patients_with_{interval}'] = list()
inspection_dict[f'patients_with_{interval}'] = list()
# dict for final files
final_datasets_dict = dict()
# loop control
is_initial_patient = True
# iterate file patients
for file_name in os.listdir(source):
# parse patient's ID
patient_id = file_name[:-5]
p(patient_id)
# read individual xlsx file
patient_records = pd.read_excel(f'{source}\\{file_name}', skiprows = 4)
patient_records.drop(labels = ['External Link'], axis = 1, inplace = True)
# remove unwanted columns
if drop_frequency_domain:
patient_records.drop(labels = ['HF', 'LF', 'LF/HF', 'ULF', 'VLF'], axis = 1, inplace = True)
# assign Patient_ID, hba1c and regulation
patient_records['Patient_ID'] = patient_id
patient_records['Class'] = patients_classification[patients_classification['Patient_ID'] == patient_id]['Class'].iloc[0]
patient_records['HbA1C(%)'] = patients_hba1c[patients_hba1c['Patient_ID'] == patient_id]['HbA1C(%)'].iloc[0]
# split the dataset by hour
patient_split_datasets_dict = dict()
records_splits = list()
# filter samples for current interval
for interval in intervals:
patient_split_datasets_dict[f'{interval}'] = patient_records[patient_records["Dataset Name"].str.startswith(f"Period: {interval}")]
records_splits.append(patient_split_datasets_dict[f'{interval}'])
# update inspection for total samples
inspection_dict['total_samples'] += patient_records.shape[0]
for interval in intervals:
# update inspection for interval samples
inspection_dict[f'samples_{interval}'] += patient_split_datasets_dict[f'{interval}'].shape[0]
# if df is empty, add patient to the ones without
if patient_split_datasets_dict[f'{interval}'].shape[0] == 0: inspection_dict[f'patients_without_{interval}'].append(patient_id)
else: inspection_dict[f'patients_with_{interval}'].append(patient_id)
assert patient_records.shape[0] == sum([df_iter.shape[0] for df_iter in patient_split_datasets_dict.values()]), 'Incorrect split on intervals'
# if split is good, don't need patient_records
del patient_records
# rearrange dataframes
for records_split_iter in records_splits:
# drop dataset name
records_split_iter.drop(labels = ['Dataset Name'], axis = 1, inplace = True)
# rearange columns
for feature_to_move_end in ['Start Date', 'End Date']:
records_split_iter.insert(records_split_iter.shape[1] - 1, feature_to_move_end, records_split_iter.pop(feature_to_move_end))
for feature_to_move_start in ['HbA1C(%)', 'Class', 'Patient_ID']:
records_split_iter.insert(0, feature_to_move_start, records_split_iter.pop(feature_to_move_start))
# add to datasets that will be saved
if is_initial_patient:
for interval in intervals:
final_datasets_dict[f'{interval}'] = patient_split_datasets_dict[f'{interval}']
is_initial_patient = False
else:
for interval in intervals:
final_datasets_dict[f'{interval}'] = final_datasets_dict[f'{interval}'].append(patient_split_datasets_dict[f'{interval}'],
ignore_index= True)
# stopping after certain amount of patients
stop_after -= 1
if stop_after <= 0: break
samples_list = list()
for interval in intervals:
samples_dict = module_dataset_analysis.quantitative_analysis(df = final_datasets_dict[f'{interval}'],
dataset_name = interval,
class_feature = 'Class',
classes = ['ND', 'GD', 'BD'])
samples_list.append(samples_dict)
samples_df =
|
pd.DataFrame(samples_list)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from scipy import stats as sps
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
from matplotlib.dates import date2num, num2date
from matplotlib import dates as mdates
from matplotlib import ticker
from matplotlib.colors import ListedColormap
from matplotlib.patches import Patch
from PIL import Image
from datetime import datetime
def calculo_rt(st, casos_panama):
st.title('Covid19 Panama: Calculo de $R_t$')
# st.subheader('Datos')
# st.dataframe(casos_panama.style.highlight_max(axis=0))
# st.write(casos_panama)
st.subheader('Casos por dia')
cases = casos_panama.sort_values('fecha')[['fecha', 'casos_totales']]
cases['date'] = pd.to_datetime(cases.fecha, format='%Y-%m-%d')
cases['casos_acumulados'] = cases.casos_totales.astype(float)
cases = cases.set_index('date')
cases =
|
pd.Series(cases['casos_acumulados'])
|
pandas.Series
|
import pandas as pd
from datetime import datetime
from sapextractor.utils import constants
def apply(dataframe, dt_column, tm_column, target_column):
try:
if str(dataframe[dt_column].dtype) != "object":
print("a")
dataframe[dt_column] = dataframe[dt_column].apply(lambda x: x.strftime(constants.DATE_FORMAT_INTERNAL))
if str(dataframe[tm_column].dtype) != "object":
print("b")
dataframe[tm_column] = dataframe[tm_column].apply(lambda x: x.strftime(constants.HOUR_FORMAT_INTERNAL))
dataframe[target_column] = dataframe[dt_column] + " " + dataframe[tm_column]
print("c")
dataframe[target_column] =
|
pd.to_datetime(dataframe[target_column], format=constants.TIMESTAMP_FORMAT)
|
pandas.to_datetime
|
from threading import Thread
import pandas as pd
import time
import os
from .util import load, dump, is_valid_filename
from .core import MiraiSeeker, Ensembler, MiraiModel
class HyperSearchSpace:
"""
This class represents the search space of hyperparameters for a base model.
:type model_class: type
:param model_class: Any class that represents a statistical model. It must
implement the methods ``fit`` as well as ``predict`` for regression or
``predict_proba`` for classification problems.
:type id: str
:param id: The id that will be associated with the models generated within
this search space.
:type parameters_values: dict, optional, default=None
:param parameters_values: A dictionary containing lists of values to be
tested as parameters when instantiating objects of ``model_class``.
:type parameters_rules: function, optional, default=lambda x: None
:param parameters_rules: A function that constrains certain parameters because
of the values assumed by others. It must receive a dictionary as input and
doesn't need to return anything. Not used if ``parameters_values`` has no
keys.
.. warning::
Make sure that the parameters accessed in ``parameters_rules`` exist
in the set of parameters defined on ``parameters_values``, otherwise
the engine will attempt to access an invalid key.
:raises: ``NotImplementedError``, ``TypeError``, ``ValueError``
:Example:
::
from sklearn.linear_model import LogisticRegression
from miraiml import HyperSearchSpace
def logistic_regression_parameters_rules(parameters):
if parameters['solver'] in ['newton-cg', 'sag', 'lbfgs']:
parameters['penalty'] = 'l2'
hyper_search_space = HyperSearchSpace(
model_class = LogisticRegression,
id = 'Logistic Regression',
parameters_values = {
'penalty': ['l1', 'l2'],
'C': np.arange(0.1, 2, 0.1),
'max_iter': np.arange(50, 300),
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'random_state': [0]
},
parameters_rules = logistic_regression_parameters_rules
)
.. warning::
**Do not** allow ``random_state`` assume multiple values. If ``model_class``
has a ``random_state`` parameter, force the engine to always choose the
same value by providing a list with a single element.
Allowing ``random_state`` to assume multiple values will confuse the engine
because the scores will be unstable even with the same choice of
hyperparameters and features.
"""
def __init__(self, model_class, id, parameters_values=None,
parameters_rules=lambda x: None):
self.__validate__(model_class, id, parameters_values, parameters_rules)
self.model_class = model_class
self.id = id
if parameters_values is None:
parameters_values = {}
self.parameters_values = parameters_values
self.parameters_rules = parameters_rules
@staticmethod
def __validate__(model_class, id, parameters_values, parameters_rules):
"""
Validates the constructor parameters.
"""
dir_model_class = dir(model_class)
if 'fit' not in dir_model_class:
raise NotImplementedError('model_class must implement fit')
if not isinstance(id, str):
raise TypeError('id must be a string')
if not is_valid_filename(id):
raise ValueError('Invalid id: {}'.format(id))
if parameters_values is not None and not isinstance(parameters_values, dict):
raise TypeError('parameters_values must be None or a dictionary')
if not callable(parameters_rules):
raise TypeError('parameters_rules must be a function')
class Config:
"""
This class defines the general behavior of the engine.
:type local_dir: str
:param local_dir: The name of the folder in which the engine will save its
internal files. If the directory doesn't exist, it will be created
automatically. ``..`` and ``/`` are not allowed to compose ``local_dir``.
:type problem_type: str
:param problem_type: ``'classification'`` or ``'regression'``. The problem
type. Multi-class classification problems are not supported.
:type hyper_search_spaces: list
:param hyper_search_spaces: The list of :class:`miraiml.HyperSearchSpace`
objects to optimize. If ``hyper_search_spaces`` has length 1, the engine
will not run ensemble cycles.
:type score_function: function
:param score_function: A function that receives the "truth" and the predictions
(in this order) and returns the score. Bigger scores must mean better models.
:type use_all_features: bool, optional, default=False
:param use_all_features: Whether to force MiraiML to always use all features
or not.
:type n_folds: int, optional, default=5
:param n_folds: The number of folds for the fitting/predicting process.
:type stratified: bool, optional, default=True
:param stratified: Whether to stratify folds on target or not. Only used if
``problem_type == 'classification'``.
:type ensemble_id: str, optional, default=None
:param ensemble_id: The id for the ensemble. If none is given, the engine will
not ensemble base models.
:raises: ``NotImplementedError``, ``TypeError``, ``ValueError``
:Example:
::
from sklearn.metrics import roc_auc_score
from miraiml import Config
config = Config(
local_dir = 'miraiml_local',
problem_type = 'classification',
hyper_search_spaces = hyper_search_spaces,
score_function = roc_auc_score,
use_all_features = False,
n_folds = 5,
stratified = True,
ensemble_id = 'Ensemble'
)
"""
def __init__(self, local_dir, problem_type, hyper_search_spaces, score_function,
use_all_features=False, n_folds=5, stratified=True, ensemble_id=None):
self.__validate__(local_dir, problem_type, hyper_search_spaces, score_function,
use_all_features, n_folds, stratified, ensemble_id)
self.local_dir = local_dir
if self.local_dir[-1] != '/':
self.local_dir += '/'
self.problem_type = problem_type
self.hyper_search_spaces = hyper_search_spaces
self.score_function = score_function
self.use_all_features = use_all_features
self.n_folds = n_folds
self.stratified = stratified
self.ensemble_id = ensemble_id
@staticmethod
def __validate__(local_dir, problem_type, hyper_search_spaces,
score_function, use_all_features, n_folds, stratified,
ensemble_id):
"""
Validates the constructor parameters.
"""
if not isinstance(local_dir, str):
raise TypeError('local_dir must be a string')
if not is_valid_filename(local_dir):
raise ValueError('Invalid directory name: {}'.format(local_dir))
if not isinstance(problem_type, str):
raise TypeError('problem_type must be a string')
if problem_type not in ('classification', 'regression'):
raise ValueError('Invalid problem type')
if not isinstance(hyper_search_spaces, list):
raise TypeError('hyper_search_spaces must be a list')
if len(hyper_search_spaces) == 0:
raise ValueError('No search spaces')
ids = []
for hyper_search_space in hyper_search_spaces:
if not isinstance(hyper_search_space, HyperSearchSpace):
raise TypeError('All hyper search spaces must be objects of ' +
'miraiml.HyperSearchSpace')
id = hyper_search_space.id
if id in ids:
raise ValueError('Duplicated search space id: {}'.format(id))
ids.append(id)
dir_model_class = dir(hyper_search_space.model_class)
if problem_type == 'classification' and 'predict_proba' not in dir_model_class:
raise NotImplementedError('Model class of id {} '.format(id) +
'must implement predict_proba for ' +
'classification problems')
if problem_type == 'regression' and 'predict' not in dir_model_class:
raise NotImplementedError('Model class of id {} '.format(id) +
'must implement predict for regression problems')
if not callable(score_function):
raise TypeError('score_function must be a function')
if not isinstance(use_all_features, bool):
raise TypeError('use_all_features must be a boolean')
if not isinstance(n_folds, int):
raise TypeError('n_folds must be an integer')
if n_folds < 2:
raise ValueError('n_folds greater than 1')
if not isinstance(stratified, bool):
raise TypeError('stratified must be a boolean')
if ensemble_id is not None and not isinstance(ensemble_id, str):
raise TypeError('ensemble_id must be None or a string')
if isinstance(ensemble_id, str) and not is_valid_filename(ensemble_id):
raise ValueError('invalid ensemble_id')
if ensemble_id in ids:
raise ValueError('ensemble_id cannot have the same id of a hyper ' +
'search space')
class Engine:
"""
This class offers the controls for the engine.
:type config: miraiml.Config
:param config: The configurations for the behavior of the engine.
:type on_improvement: function, optional, default=None
:param on_improvement: A function that will be executed everytime the engine
finds an improvement for some id. It must receive a ``status`` parameter,
which is the return of the method :func:`request_status`.
:raises: ``TypeError``
:Example:
::
from miraiml import Engine
def on_improvement(status):
print('Scores:', status['scores'])
engine = Engine(config, on_improvement=on_improvement)
"""
def __init__(self, config, on_improvement=None):
self.__validate__(config, on_improvement)
self.config = config
self.on_improvement = on_improvement
self.__is_running__ = False
self.must_interrupt = False
self.mirai_seeker = None
self.models_dir = config.local_dir + 'models/'
self.train_data = None
self.ensembler = None
self.n_cycles = 0
@staticmethod
def __validate__(config, on_improvement):
"""
Validates the constructor parameters.
"""
if not isinstance(config, Config):
raise TypeError('miraiml.Engine\'s constructor requires an object ' +
'of miraiml.Config')
if on_improvement is not None and not callable(on_improvement):
raise TypeError('on_improvement must be None or a function')
def is_running(self):
"""
Tells whether the engine is running or not.
:rtype: bool
:returns: ``True`` if the engine is running and ``False`` otherwise.
"""
return self.__is_running__
def interrupt(self):
"""
Makes the engine stop on the first opportunity.
.. note::
This method is **not** asynchronous. It will wait for the engine to
stop.
"""
self.must_interrupt = True
if self.ensembler is not None:
self.ensembler.interrupt()
while self.__is_running__:
time.sleep(.1)
self.must_interrupt = False
def load_data(self, train_data, target_column, test_data=None, restart=False):
"""
Interrupts the engine and loads a new pair of train/test datasets. All of
their columns must be instances of str or int.
:type train_data: pandas.DataFrame
:param train_data: The training data.
:type target_column: str or int
:param target_column: The target column identifier.
:type test_data: pandas.DataFrame, optional, default=None
:param test_data: The testing data. Use the default value if you don't
need to make predictions for data with unknown labels.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after updating data or not.
:raises: ``TypeError``, ``ValueError``
"""
self.columns_renaming_map = {}
self.columns_renaming_unmap = {}
train_data, target_column, test_data, needs_columns_casting = self.__validate_data__(
train_data, target_column, test_data
)
if needs_columns_casting:
for column in train_data.columns:
column_renamed = str(column)
self.columns_renaming_map[column] = column_renamed
self.columns_renaming_unmap[column_renamed] = column
train_data = train_data.rename(columns=self.columns_renaming_map)
if test_data is not None:
test_data = test_data.rename(columns=self.columns_renaming_map)
self.interrupt()
self.train_data = train_data.drop(columns=target_column)
self.train_target = train_data[target_column]
self.all_features = list(self.train_data.columns)
self.test_data = test_data
if self.mirai_seeker is not None:
self.mirai_seeker.reset()
if restart:
self.restart()
@staticmethod
def __validate_data__(train_data, target_column, test_data):
"""
Validates the input data.
"""
if not isinstance(train_data, pd.DataFrame):
raise TypeError('Training data must be an object of pandas.DataFrame')
if test_data is not None and not isinstance(test_data, pd.DataFrame):
raise TypeError('Testing data must be None or an object of pandas.DataFrame')
if target_column not in train_data.columns:
raise ValueError('target_column must be a column of train_data')
train_columns = train_data.columns
if test_data is not None:
test_columns = test_data.columns
for column in train_columns:
if column != target_column and column not in test_columns:
raise ValueError('All input columns in train data must be test data')
needs_columns_casting = False
for column in train_columns:
if not isinstance(column, str):
if not isinstance(column, int):
raise ValueError('All columns names must be either str or int')
needs_columns_casting = True
return train_data, target_column, test_data, needs_columns_casting
def shuffle_train_data(self, restart=False):
"""
Interrupts the engine and shuffles the training data.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after shuffling data or not.
:raises: ``RuntimeError``
.. note::
It's a good practice to shuffle the training data periodically to avoid
overfitting on a certain folding pattern.
"""
if self.train_data is None:
raise RuntimeError('No data to shuffle')
self.interrupt()
seed = int(time.time())
self.train_data = self.train_data.sample(frac=1, random_state=seed)
self.train_target = self.train_target.sample(frac=1, random_state=seed)
if restart:
self.restart()
def reconfigure(self, config, restart=False):
"""
Interrupts the engine and loads a new configuration.
:type config: miraiml.Config
:param config: The configurations for the behavior of the engine.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after reconfiguring it or
not.
"""
self.interrupt()
self.config = config
if self.mirai_seeker is not None:
self.mirai_seeker.reset()
if restart:
self.restart()
def restart(self):
"""
Interrupts the engine and starts again from last checkpoint (if any).
:raises: ``RuntimeError``, ``KeyError``
"""
if self.train_data is None:
raise RuntimeError('No data to train')
self.interrupt()
def starter():
try:
self.__main_loop__()
except Exception:
self.__is_running__ = False
raise
Thread(target=starter).start()
def __improvement_trigger__(self):
"""
Called when an improvement happens.
"""
if self.on_improvement is not None:
self.on_improvement(self.request_status())
def __update_best__(self, score, id):
"""
Updates the best id of the engine.
"""
if self.best_score is None or score > self.best_score:
self.best_score = score
self.best_id = id
def __main_loop__(self):
"""
Main optimization loop.
"""
self.__is_running__ = True
if not os.path.exists(self.models_dir):
os.makedirs(self.models_dir)
self.base_models = {}
self.train_predictions_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Apode Project (https://github.com/ngrion/apode).
# Copyright (c) 2020, <NAME> and <NAME>
# License: MIT
# Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt
# =============================================================================
# DOCS
# =============================================================================
"""Data simulation tools for Apode."""
# =============================================================================
# IMPORTS
# =============================================================================
from apode.basic import ApodeData
import numpy as np
import pandas as pd
# =============================================================================
# FUNCTIONS
# =============================================================================
def make_pareto(seed=None, a=5, size=100, c=200, nbin=None):
"""Pareto Distribution.
Parameters
----------
seed: int, optional(default=None)
a: float, optional(default=5)
size: int, optional(default=100)
c: int, optional(default=200)
nbin: int, optional(default=None)
Return
------
out: float array
Array of random numbers.
"""
random = np.random.RandomState(seed=seed)
y = c * random.pareto(a=a, size=size)
df =
|
pd.DataFrame({"x": y})
|
pandas.DataFrame
|
import datetime as dt
import gc
import json
import logging
import os
import pickle
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
import h5py
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyproj
import rasterio as rio
import simplekml
from cataloging.vi import gliImage, ngrdiImage, osaviImage
from fluidml.common import Task
#from PIL import Image
from pycpd import RigidRegistration
from pykml import parser
from rasterio.enums import Resampling
from rasterio.transform import rowcol, xy
from rasterio.windows import Window
from scipy.ndimage import distance_transform_edt
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
#from skimage.exposure import equalize_adapthist
from skimage.feature import peak_local_max
from skimage.filters import gaussian, threshold_otsu
from skimage.measure import label, regionprops
from skimage.segmentation import watershed
from skimage.transform import hough_line, hough_line_peaks, resize
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger(__name__)
# suppress pickle 'error' from rasterio
logging.Logger.manager.loggerDict['rasterio'].setLevel(logging.CRITICAL)
logging.Logger.manager.loggerDict['matplotlib'].setLevel(logging.CRITICAL)
import warnings
warnings.filterwarnings("ignore")
mpl.use('Agg')
def read_raster(
image_path: str,
all_channels: np.array,
channels: List[str]
):
ch = [np.argmax(all_channels == c)+1 for c in channels]
raster = rio.open(image_path)
if raster.dtypes[0] == "float32":
data = raster.read(ch, fill_value=np.nan)
data /= np.nanmax(data)
elif raster.dtypes[0] == "uint8":
if "alpha" in all_channels:
data = raster.read(ch).astype(np.float32)
alpha_ch = raster.read(int(np.argmax(all_channels == "alpha")+1))
for d in data[:,:]:
d[alpha_ch == 0] = np.nan
else:
data = raster.read(ch, fill_value=0).astype(np.float32)
else:
raise NotImplementedError()
return np.transpose(data, axes=(1,2,0))
def write_onechannel_raster(
image_path: str,
image: np.array,
meta: Dict, dtype: str
):
if dtype == 'float32':
meta.update({
'dtype': 'float32',
'height': image.shape[0],'count': 1,'nodata': -32767,
'width': image.shape[1]})
elif dtype == 'uint8':
meta.update({
'dtype': 'uint8',
'height': image.shape[0],'count': 1,'nodata': 0,
'width': image.shape[1]})
else:
raise NotImplementedError()
with rio.open(image_path, "w", **meta) as dest:
dest.write(image,1)
def calc_m_per_px(
raster_meta: Dict
) -> float:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of anchor point
lon0, lat0 = xy(raster_meta["transform"],0,0)
# calculate UTM zone
utm_zone = int(np.floor((lon0/360)*60+31))
utm = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
UTM0_x, UTM0_y = utm(*xy(raster_meta["transform"],0,0))
UTM1_x, UTM1_y = utm(*xy(raster_meta["transform"],0,1))
UTM2_x, UTM2_y = utm(*xy(raster_meta["transform"],1,0))
# calculate unit pixel distances
pxx = abs(UTM1_x - UTM0_x)
pxy = abs(UTM2_y - UTM0_y)
# take mean (assume quadratic pixels)
m_per_px = np.mean([pxx, pxy])
return m_per_px
def px_to_utm(
point_cloud: np.ndarray,
raster_meta: Dict
) -> Tuple[np.ndarray, pyproj.proj.Proj]:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of point cloud
lon, lat = np.asarray(xy(raster_meta["transform"],*point_cloud.T))
# calculate UTM zone
utm_zone = int(np.floor((lon.mean()/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
utm = np.asarray(utm_transform(lon, lat)).T
return utm, utm_transform
def readCoordsFromKml(
filename: str
) -> np.ndarray:
with open(filename, "r") as kmlfile:
root = parser.parse(kmlfile).getroot()
lonlat = []
for c in root.Document.iterchildren():
lonlat.append([float(x) for x in c.Point.coordinates.text.split(",")[:2]])
lonlat = np.asarray(lonlat)
return lonlat
def growFunction(
x: float,
g: float,
lg: float,
xg: float,
d: float,
ld: float,
xd: float
) -> float:
if d > 0:
return (g/(1+np.exp(-lg*(x-xg)))) - d/(1+np.exp(-ld*(x-xd)))
else:
return (g/(1+np.exp(-lg*(x-xg))))
def cumDays(
observation_dates: Union[List[float],np.array]
) -> np.array:
cum_days = np.cumsum([d.days for d in np.diff(np.sort(observation_dates))]).astype(float)
cum_days = np.hstack((0, cum_days))
return cum_days
def growScaling(
cum_days: np.array,
bounds: Tuple,
grow_func_params: np.array
) -> np.array:
earliest, latest = bounds
grow_func = growFunction(cum_days, *grow_func_params)
maxgrow_val = np.max(grow_func)
grow_func = (grow_func - grow_func[0]) / (maxgrow_val - grow_func[0])
scaled = grow_func * (latest - earliest) + earliest
return scaled
def makeDirectory(
directory: str
) -> None:
if not os.path.exists(directory):
os.makedirs(directory)
def group_points(
points: np.array,
layers: np.array,
max_dist: float
) -> Tuple[np.array, np.array]:
nn = NearestNeighbors(n_neighbors=1, n_jobs=-1)
# initialization
# -> all labels to -1
labels = -np.ones_like(layers)
# all given layers
uni_layers = np.unique(layers)
# -> give points of first layer individual group labels
labels[layers == uni_layers[0]] = np.arange(np.sum(layers == uni_layers[0]))
# -> first evaluation point cloud: first layer
centroids = points[layers == uni_layers[0]]
ind = np.arange(len(points))
for i in range(1, len(uni_layers)):
# fit nearest neighbor model
nn.fit(centroids)
# evaluate on next layer
dist, ass_group = nn.kneighbors(points[layers == uni_layers[i]])
dist = dist.flatten()
ass_group = ass_group.flatten()
# exclude points that have more than max_dist distance to a neighbor
# new_member array:
# 1 = valid member candidate for existing group
# 0 = valid member candidate for new group
# -1 = excluded due to multiple candidates for a single group
new_member = (dist <= max_dist).astype(int)
# if multiple (valid!) points are assigned to the same group, take the nearest
valid = np.copy(new_member).astype(bool)
valid_ind = np.arange(len(valid))[valid]
for j, counts in enumerate(np.bincount(ass_group[valid])):
if counts > 1:
ass_group_ind = valid_ind[ass_group[valid] == j]
best_ind = ass_group_ind[np.argsort(dist[ass_group_ind])]
new_member[best_ind[1:]] = -1
# assign the group labels to the new members
layer_ind = ind[layers == uni_layers[i]]
old_layer_ind = layer_ind[new_member == 1]
labels[old_layer_ind] = ass_group[new_member == 1]
# give new group labels to points not registered so far
new_layer_ind = layer_ind[new_member == 0]
labels[new_layer_ind] = np.arange(labels.max()+1, labels.max()+1+len(new_layer_ind))
# new reference cloud are the centroids of the so far accumulated clusters
centroids = np.stack([np.mean(points[labels == label], axis=0) for label in range(labels.max()+1)])
return labels, centroids
def inverse_transform(
xy_centered_aligned,
xy_center,
transform_coeffs
):
s = transform_coeffs[0]
rot = np.deg2rad(transform_coeffs[1])
t = transform_coeffs[2:]
rot_inv = np.array([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
return rot_inv@(xy_centered_aligned-t).T/s + xy_center
def add_non_detected(
df_less: pd.DataFrame,
df_meta: pd.DataFrame
) -> pd.DataFrame:
dates = np.unique(df_meta["date"])
xy_center = df_meta["xy_center"].iloc[0]
df_add = pd.DataFrame()
for g_id in np.unique(df_less["group_id"]):
df_group = df_less[df_less["group_id"] == g_id]
missing_dates = dates[np.isin(dates, df_group["date"], invert=True)]
for d in missing_dates:
xy_centered_aligned = df_group["xy_centered_aligned_cm"].mean(axis=0) # group centroid [cm (UTM)]
cropline_y = df_group["y_cropline_rotated_cm"].iloc[0]
align_transform = df_meta[df_meta["date"] == d]["align_transform"].iloc[0]
gps_transform = df_meta[df_meta["date"] == d]["gps_transform"].iloc[0]
utm_transform = df_meta[df_meta["date"] == d]["utm_transform"].iloc[0]
#cr = df_meta[df_meta["date"] == d]["cover_ratio"].values
#mc = df_meta[df_meta["date"] == d]["align_median_confidence"].values
xy_backtrans = inverse_transform(xy_centered_aligned, xy_center, align_transform)
lonlat_backtrans = utm_transform(*xy_backtrans/100., inverse=True)
df_add = df_add.append(
dict([("field_id" , df_group["field_id"].iloc[0]),
("date" , d),
("group_id" , g_id),
("group_size" , df_group["group_size"].iloc[0]),
("group_cropline_id" , df_group["group_cropline_id"].iloc[0]),
("xy_cm" , xy_backtrans),
("xy_px" , list(rowcol(gps_transform, *lonlat_backtrans))),
("lonlat" , lonlat_backtrans),
("xy_centered_aligned_cm" , xy_centered_aligned),
("xy_centroid_centered_aligned_cm" , xy_centered_aligned),
("y_cropline_rotated_cm" , cropline_y),
("centroid_dist_cm" , 0.),
("detected" , False)]), ignore_index=True)
return df_add
def filterGoodPlantsByPercDet(
plants_df: pd.DataFrame,
meta_df: pd.DataFrame,
filter_coverratio: float,
perc_min_det: float
) -> pd.DataFrame:
plants_meta_df = plants_df.merge(meta_df, on=["date", "field_id"], how="left")
n_dates = len(np.unique(meta_df["date"]))
# good plant group := at least perc_min_det direct detection ratio up to certain given cover ratio
good_idx = []
for f_id in np.unique(meta_df["field_id"]):
n_counts_below_cr_thres = np.sum(np.unique(plants_meta_df[plants_meta_df["field_id"]==f_id]["cover_ratio"]) <= filter_coverratio)
groups, counts = np.unique(plants_meta_df[(plants_meta_df["field_id"]==f_id) & (plants_meta_df["cover_ratio"] <= filter_coverratio) & (plants_meta_df["detected"] == True)]["group_id"], return_counts=True)
interest_groups = groups[counts/float(n_counts_below_cr_thres) >= perc_min_det]
candidates = plants_meta_df[(plants_meta_df["field_id"]==f_id) & (np.isin(plants_meta_df["group_id"], interest_groups))]
for g_id in interest_groups:
cand_group = candidates[candidates["group_id"]==g_id]
if len(cand_group)==n_dates:
good_idx.extend(cand_group.index)
good_df = plants_meta_df.loc[good_idx].sort_values(["field_id", "group_id", "date"])
return good_df
class SegmentSoilPlants(Task):
def __init__(
self,
image_path: str,
image_channels: List[str],
veg_index: str,
use_watershed: bool,
max_coverratio: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.image_path = image_path
self.image_channels = np.asarray(image_channels)
self.veg_index = veg_index
self.use_watershed = use_watershed
self.max_coverratio = max_coverratio
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_raw(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot raw image.")
if len(self.image_channels) < 4:
n_rows, n_cols = 1, len(self.image_channels)
else:
n_rows, n_cols = 2, len(self.image_channels)//2
fig, ax = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, figsize=(self.width/500*n_cols, self.height/800*n_rows))
data = read_raster(self.image_path, self.image_channels, self.image_channels)
for (i, (a, c)) in enumerate(zip(ax.ravel(), self.image_channels)):
im = a.imshow(data[:,:,i], cmap=self.plot_cmap)
try:
fig.colorbar(im, ax=a)
except:
pass
a.set(xlabel='x', ylabel='y', title = c, aspect='equal')
fig.suptitle("raw image data")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_01_channels"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
del data, fig, ax, im
plt.close("all")
gc.collect()
def plot_segmentation(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot segmentation image.")
fig = plt.figure(figsize=(3*self.width/500, self.height/500), tight_layout=True)
gridspec = gs.GridSpec(1,3,width_ratios=[2,1,2], figure=fig)
ax1 = fig.add_subplot(gridspec[0])
ax2 = fig.add_subplot(gridspec[1])
ax3 = fig.add_subplot(gridspec[2])
m = ax1.imshow(self.vi_image.astype(float), cmap=self.plot_cmap, vmin=-1, vmax=1)
cb = fig.colorbar(m, ax=ax1)
cb.set_label("VI")
ax1.set(title=f"{self.veg_index} image", xlabel="px", ylabel="px")
ax2.hist(self.vi_image[np.isfinite(self.vi_image)], bins=256, orientation="horizontal", color="C0")
ax2.set(title=f"{self.veg_index} value distribution", ylim=(-1,1), xlabel="counts", xscale="log")
if self.cover_ratio_est < 0.01:
ax2.axhline(self.thres, c='r', label=f"Threshold (99-percentile): {self.thres:.2f}")
else:
ax2.axhline(self.thres, c='r', label=f"Threshold (Otsu): {self.thres:.2f}")
ax2.legend()
ax3.imshow(self.seg_mask, cmap=self.plot_cmap)
ax3.set(title=f"Segmented plant area (cover ratio: {100.*self.cover_ratio:.2f} %)", xlabel="px", ylabel="px")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_02_segmentation"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax1, ax2, ax3
gc.collect()
def run(
self
):
try:
self.field_id, d = os.path.basename(self.image_path).replace(".tif", "").split("_")[:2]
year = int(d[:4])
month = int(d[4:6])
day = int(d[6:8])
self.date = dt.datetime(year, month, day)
except:
logger.error(f"Wrong image path or no files found: {self.image_path}")
logger.info(f"{self.name}-{self.date.date()} -> Load image.")
raster = rio.open(self.image_path)
raster_meta = raster.meta
self.height, self.width = raster.shape
px_res = calc_m_per_px(raster_meta)*100. # cm/px
logger.info(f"{self.name}-{self.date.date()} -> Calculated resolution: {px_res:.4f} cm/px.")
del raster
gc.collect()
# calculate Vegetation Index which has values in [-1,1]
if self.veg_index == "NGRDI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G"])
self.vi_image = ngrdiImage(R = channels[:,:,0], G = channels[:,:,1])
est_thres = 0
elif self.veg_index == "GLI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G", "B"])
self.vi_image = gliImage(R = channels[:,:,0], G = channels[:,:,1], B = channels[:,:,2])
est_thres = 0.2
elif self.veg_index == "OSAVI":
channels = read_raster(self.image_path, self.image_channels, ["R", "NIR"])
self.vi_image = osaviImage(R = channels[:,:,0], NIR = channels[:,:,1], y_osavi = 0.6)
est_thres = 0.25
del channels
gc.collect()
# cover ratio estimation
self.cover_ratio_est = np.nansum(self.vi_image >= est_thres)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Use {self.veg_index} Vegetation Index. Cover ratio estimation: {self.cover_ratio_est*100.:.2f} %")
if self.cover_ratio_est <= self.max_coverratio:
# calculate threshold with Otsu's method
if self.cover_ratio_est < 0.01:
self.thres = np.percentile(self.vi_image[np.isfinite(self.vi_image)], 99)
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio below 1 % -> Take 99-percentile as threshold: {self.thres:.2f}")
else:
self.thres = threshold_otsu(self.vi_image[np.isfinite(self.vi_image)])
logger.info(f"{self.name}-{self.date.date()} -> Otsu threshold: {self.thres:.2f}")
# segmentation
if self.use_watershed:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants with watershed method.")
markers = np.zeros_like(self.vi_image, dtype=np.uint8)
markers[self.vi_image <= self.thres] = 1 # soil
markers[self.vi_image > self.thres] = 2 # plant
self.seg_mask = (watershed(self.vi_image, markers) - 1).astype(bool) # True -> plant, False -> soil
del markers
else:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants without watershed method.")
self.seg_mask = np.zeros_like(self.vi_image, dtype=bool) # True -> plant, False -> soil
self.seg_mask[self.vi_image > self.thres] = True # plant
self.cover_ratio = np.sum(self.seg_mask)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Cover ratio recalculated: {self.cover_ratio*100.:.2f} %")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_segmentation()
gc.collect()
else:
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio ({self.cover_ratio_est*100.:.2f} %) is too high to extract plants -> Skip plot.")
self.seg_mask = []
self.cover_ratio = self.cover_ratio_est
self.save(obj=self.seg_mask, name="segmentation_mask", type_='pickle')
self.save(obj=self.cover_ratio, name="cover_ratio", type_='json')
self.save(obj=self.field_id, name="field_id", type_='json')
self.save(obj=self.date, name="date", type_='pickle')
self.save(obj=raster_meta, name="raster_meta", type_='pickle')
self.save(obj=px_res, name="px_resolution", type_='json')
if (self.make_orthoimage) and (self.seg_mask != []):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save segmentation mask as orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_segmentation.tif"),
np.uint8(self.seg_mask*255),
raster_meta,
"uint8")
# plot raw channel information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_raw()
gc.collect()
class FitGrowFunction(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot(
self
):
logger.info(f"{self.name} -> Plot Grow function.")
g, lg, xg, d, ld, xd = self.fit
cd = np.linspace(0, self.cum_days[-1], 1000)
cal_days = [self.observation_dates[0] + dt.timedelta(days=x) for x in self.cum_days]
fig, ax = plt.subplots()
ax.scatter(self.cum_days, self.cover_ratios, label="observations")
if d > 0:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}-\frac{d}{1+e^{-\lambda_d(x-x_d)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}\n$d$={d:.4g}, $\\lambda_d$={ld:.4g}, $x_d$={xd:.4g}"
else:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}"
ax.plot(cd, growFunction(cd, *self.fit), c="r", label=label)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.set(xlabel="days", ylabel="cover ratio")
ax.legend()
ax.grid()
ax_dt = ax.twiny()
ax_dt.set_xlim(map(lambda cd: self.observation_dates[0] + dt.timedelta(days=cd), ax.get_xlim()))
ax_dt.set_xlabel("calendar date")
ax_dt.set_xticks(cal_days)
ax_dt.tick_params(axis='x', labelrotation=90)
ax.set(title=f"{self.field_id}: grow function fit")
savename = os.path.join(self.plot_dir, f"{self.field_id}_grow_function"+self.plot_format)
fig.savefig(savename, dpi=self.plot_dpi, bbox_inches='tight')
plt.close("all")
del fig, ax, ax_dt
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios = []
observation_dates = []
for r in reduced_results:
cover_ratios.append(r["result"]["cover_ratio"])
observation_dates.append(r["result"]["date"])
observation_dates = np.asarray(observation_dates)
cover_ratios = np.asarray(cover_ratios)
sort = np.argsort(observation_dates)
self.observation_dates = observation_dates[sort]
self.cover_ratios = cover_ratios[sort]
self.cum_days = cumDays(self.observation_dates)
self.field_id = reduced_results[0]["result"]["field_id"]
try:
self.fit, self.cov = curve_fit(growFunction, self.cum_days, self.cover_ratios,
p0=[0.8, 0.1, self.cum_days[-1]/3, 0.3, 0.1, 2*self.cum_days[-1]/3],
maxfev=1000000)
# calculate corrected cover ratios with grow function
#gf_cover_ratio = growFunction(self.cum_days, *self.fit)
#self.save(obj=gf_cover_ratio, name="grow_function_cover_ratios", type_='pickle')
#self.save(obj=self.observation_dates, name="dates", type_='pickle')
logger.info(f"{self.name} -> Grow function fitted")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
except Exception as e:
self.fit = np.nan
self.cov = np.nan
logger.warning(f"{self.name} -> Grow function could not be fitted. Error: {e}")
self.save(obj=self.fit, name="grow_function_fit_params", type_='pickle')
self.save(obj=self.cov, name="grow_function_cov_matrix", type_='pickle')
class ExtractPlantPositions(Task):
def __init__(
self,
min_peak_distance: float,
peak_threshold: float,
gauss_sigma_bounds: Tuple[float, float],
use_growfunction: bool,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.min_peak_distance = min_peak_distance
self.peak_threshold = peak_threshold
self.gauss_sigma_bounds = gauss_sigma_bounds
self.use_growfunction = use_growfunction
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_gauss_blur(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot Gaussian blur image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
im = ax.imshow(self.blurred, cmap='gray')
ax.set(title=f"Gaussian blur ($\sigma$ = {self.sigma:.2f} px)", aspect='equal', xlabel='x [cm]', ylabel='y [cm]')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_03_gauss_blur"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_peaks(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot peak position image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
ax.scatter(*self.peaks.T[::-1], color='red', s=2, label=f"{len(self.peaks)} peaks")
ax.imshow(self.blurred, cmap=self.plot_cmap)
ax.set(title=f"Peaks (min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px)", aspect='equal', xlabel='x [px]', ylabel='y [px]')
ax.legend()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_04_peaks"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
segmentation_mask: np.ndarray,
#grow_function_cover_ratios: np.array,
#dates: np.array,
px_resolution: float,
cover_ratio: float,
date: dt.datetime,
field_id: str,
raster_meta: Dict
):
self.date = date
self.field_id = field_id
self.px_res = px_resolution
if len(segmentation_mask) > 0:
# apply gaussian filter with scaled sigma
if self.use_growfunction:
raise NotImplementedError()
#cover_ratio = grow_function_cover_ratios[dates == date]
#logger.info(f"{self.name}-{self.date.date()} -> Use cover ratio from grow function fit. ({100.*cover_ratio:.2f} %)")
else:
logger.info(f"{self.name}-{self.date.date()} -> Use standard cover ratio. ({100.*cover_ratio:.2f} %)")
self.sigma = (self.gauss_sigma_bounds[0] + cover_ratio*np.diff(self.gauss_sigma_bounds)[0]) / self.px_res
logger.info(f"{self.name}-{self.date.date()} -> Blurring with sigma = {self.sigma*px_resolution:.2f} cm = {self.sigma:.2f} px.")
self.blurred = gaussian(segmentation_mask.astype(np.float32), sigma=self.sigma)
# detect peaks
logger.info(f"{self.name}-{self.date.date()} -> Detect peaks with threshold {self.peak_threshold} and min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px.")
self.peaks = peak_local_max(self.blurred, min_distance=int(np.round(self.min_peak_distance/self.px_res)), threshold_abs=self.peak_threshold, exclude_border=False)
# convert peak position from pixel to cm coordinates with UTM coordinate transformation
utm_peaks, utm_transform = px_to_utm(point_cloud=self.peaks, raster_meta=raster_meta)
utm_peaks *= 100 # m * 100 = cm
n_peaks = len(self.peaks)
self.height, self.width = self.blurred.shape
logger.info(f"{self.name}-{self.date.date()} -> {n_peaks} peaks detected.")
if (self.make_orthoimage):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save Gauss blurred orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_blurred.tif"),
self.blurred,
raster_meta,
"float32")
logger.info(f"{self.name}-{self.date.date()} -> Export found peak positions as KML file.")
kml = simplekml.Kml()
for (lon, lat) in np.asarray(xy(raster_meta["transform"], *self.peaks.T)).T:
kml.newpoint(coords=[(lon, lat)])
kml.save(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_peaks.kml"))
else:
logger.warn(f"{self.name}-{self.date.date()} -> No segmentation mask due to large cover ratio -> Skip plot.")
utm_peaks = np.array([])
# calculate UTM zone
lon, lat = np.asarray(xy(raster_meta["transform"], raster_meta["height"]//2, raster_meta["width"]//2))
utm_zone = int(np.floor((lon/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
self.save(obj=utm_peaks, name="plant_positions", type_="pickle")
self.save(obj=utm_transform, name="utm_transform", type_="pickle")
# plot blurred image and contrast image with peak positions
if (len(segmentation_mask) > 0) and self.plot_result:
makeDirectory(self.plot_dir)
self.plot_gauss_blur()
self.plot_peaks()
gc.collect()
class LoadPeaks(Task):
def __init__(
self,
field_id: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.field_id = field_id
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot(
self
):
logger.info(f"{self.name} -> Plot raw peaks image.")
fig, ax = plt.subplots()
ax.scatter(*self.C.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\nraw points", xlabel='x [cm]', ylabel='y [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_01_raw"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios, dates, gps_transforms, px_resolutions, field_ids, peaks, utm_transforms, segmentation_masks = [], [], [], [], [], [], [], []
for r in reduced_results:
try:
if len(r["config"].keys()) == 1:
cover_ratios.append(r["result"]["cover_ratio"])
dates.append(r["result"]["date"])
gps_transforms.append(r["result"]["raster_meta"]["transform"])
px_resolutions.append(r["result"]["px_resolution"])
field_ids.append(r["result"]["field_id"])
segmentation_masks.append(r["result"]["segmentation_mask"])
else:
peaks.append(r["result"]["plant_positions"])
utm_transforms.append(r["result"]["utm_transform"])
except:
logger.error(r)
assert len(np.unique(field_ids)) == 1, logger.error(f"{self.name} -> Multiple field IDs!")
assert np.unique(field_ids)[0] == self.field_id, logger.error(f"{self.name} -> Wrong field ID!")
cover_ratios = np.asarray(cover_ratios)
px_resolutions = np.asarray(px_resolutions)
dates = pd.DatetimeIndex(dates)
P = np.asarray(peaks)
logger.info(f"{self.name} -> Load data for {len(dates)} dates.")
# sort dates and layers by cover ratio
cr_sort = np.argsort(cover_ratios)
P = P[cr_sort]
dates = dates[cr_sort]
segmentation_masks = [segmentation_masks[c] for c in cr_sort]
gps_transforms = [gps_transforms[c] for c in cr_sort]
px_resolutions = px_resolutions[cr_sort]
cover_ratios = np.sort(cover_ratios)
n_layers = len(dates)
logger.info(f"{self.name} -> Sorted dates and layers by cover ratio. Layers: {cr_sort}, dates: {dates}, cover ratios: {cover_ratios}")
# dates for printing (e.g. in plots)
printdates = dates.format(formatter=lambda x: x.strftime('%m-%d'))
emptymask = [len(p)>0 for p in P]
logger.info(f"{self.name} -> Peaks for {np.sum(emptymask)} dates available.")
# stack point clouds and save layers
self.C = np.vstack(P[emptymask])
self.layers = np.repeat(np.arange(len(P)), np.array([len(p) for p in P]))
self.save(obj=self.C, name="point_cloud", type_="pickle")
self.save(obj=self.layers, name="layers", type_="pickle")
self.save(obj=cover_ratios, name="cover_ratios", type_="pickle")
self.save(obj=self.field_id, name="field_id", type_="json")
self.save(obj=printdates, name="printdates", type_="pickle")
self.save(obj=dates, name="dates", type_="pickle")
self.save(obj=gps_transforms, name="gps_transforms", type_="pickle")
self.save(obj=px_resolutions, name="px_resolutions", type_="pickle")
self.save(obj=utm_transforms, name="utm_transforms", type_="pickle")
self.save(obj=segmentation_masks, name="segmentation_masks", type_="pickle")
# plot raw point information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class AlignPoints(Task):
def __init__(
self,
max_centroid_distance_cpd: float,
max_centroid_distance_group: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.max_centroid_distance_cpd = max_centroid_distance_cpd
self.max_centroid_distance_group = max_centroid_distance_group
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def transform(
coords: np.array,
T: np.array
) -> np.array:
return T[0]*coords@T[1] + T[2]
def plot_aligned(
self
):
logger.info(f"{self.name} -> Plot aligned peak position image.")
fig, ax = plt.subplots()
ax.scatter(*self.P_aligned.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\naligned points\naligned dates: {self.aligned_dates}", xlabel='x - mean [cm]', ylabel='y - mean [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_02_aligned"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_confidence(
self
):
logger.info(f"{self.name} -> Plot alignment mean confidence.")
fig, ax = plt.subplots()
ax.scatter(100*self.cover_ratios, 100*self.median_conf)
ax.set(xlim=(0,100), ylim=(0,100), title=f"{self.field_id}\n", xlabel='cover ratio [%]', ylabel='median alignment confidence [%]', aspect='equal')
ax.grid()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_03_cr_vs_conf"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud: np.ndarray,
layers: np.array,
cover_ratios: np.array,
printdates: np.array,
field_id: str,
utm_transforms: List
):
self.field_id = field_id
self.layers = layers
self.printdates = printdates
self.cover_ratios = cover_ratios
uni_layers = np.sort(np.unique(layers))
n_layers = len(self.cover_ratios)
# centralize point clouds
# calculate centroid of all points in UTM coordinates
P_mean = point_cloud.mean(axis=0)
# apply on point cloud
P_c = point_cloud - P_mean
scaF = np.ones(n_layers)
rotA = np.zeros(n_layers)
traV = np.zeros((n_layers, 2))
self.median_conf = np.nan*np.ones(n_layers)
self.P_aligned = P_c.copy()
P_centroid = P_c[layers == uni_layers[0]]
self.P_aligned[layers == uni_layers[0]] = P_centroid
aligned_layers = []
for l in uni_layers:
if l != 0:
X = P_centroid
Y = P_c[layers == l]
# filter points with no neighbours inside max_dist radius
nnX = NearestNeighbors(n_neighbors=1, n_jobs=-1)
nnY = NearestNeighbors(n_neighbors=1, n_jobs=-1)
nnX.fit(X)
nnY.fit(Y)
distXY, _ = nnY.kneighbors(X)
distYX, _ = nnX.kneighbors(Y)
X_filt = X[(distXY <= self.max_centroid_distance_cpd).flatten()]
Y_filt = Y[(distYX <= self.max_centroid_distance_cpd).flatten()]
# Rigid Transformation: T(X) = s*R@X + t
# s: scaling factor
# R: rotation matrix
# t: translation vector
# <NAME>, <NAME>: "Point Set Registration: Coherent Point Drift"
# https://arxiv.org/pdf/0905.2635.pdf
# registration with filtered points
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} -> Try to align {len(Y_filt)} of {len(Y)} points to {len(X_filt)} of {len(X)} centroids. Maximum centroid distance: {self.max_centroid_distance_cpd} cm.")
reg = RigidRegistration(X=X_filt, Y=Y_filt) # X = target, Y = source
_, T = reg.register()
self.median_conf[l] = np.median(np.max(reg.P, axis=1))
# if registration was confident (median confidence above 68%) accept, else discard
#if self.median_conf[l] > 0.68:
scaF[l] = T[0]
rotA[l] = np.rad2deg(np.arccos(T[1][0,0]))
traV[l] = T[2]
self.P_aligned[layers == l] = self.transform(Y, T)
aligned_layers.append(l)
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} alignable layers aligned. Scaling factor: {scaF[l]}. Rotation angle: {rotA[l]} °. Translation vector: {traV[l]} cm. Median confidence: {100.*self.median_conf[l]:.2f} %")
#else:
# logger.warn(f"{self.name} -> Layer {l} of {len(uni_layers)} has too low median confidence ({100.*self.median_conf[l]:.2f} %). Layer will not be aligned.")
#if l <= self.max_reference_layer:
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} -> Group with maximum centroid distance: {self.max_centroid_distance_group} cm.")
_, P_centroid = group_points(self.P_aligned[self.layers <= l],
self.layers[self.layers <= l],
max_dist=self.max_centroid_distance_group)
logger.info(f"{self.name} -> All points aligned.")
self.save(obj=self.P_aligned, name="point_cloud_aligned", type_="pickle")
self.save(obj=P_mean, name="point_cloud_mean", type_="pickle")
self.save(obj=(scaF, rotA, traV, self.median_conf), name="align_transform", type_="pickle")
self.aligned_dates = np.asarray(self.printdates)[aligned_layers].tolist()
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_aligned()
self.plot_confidence()
gc.collect()
if (self.make_orthoimage):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name} -> Export aligned point cloud as KML file.")
kml = simplekml.Kml()
for l in uni_layers:
folder = kml.newfolder(name=self.printdates[l])
for (lon, lat) in np.asarray(utm_transforms[l](*((self.P_aligned[self.layers == l]+P_mean)/100.).T, inverse=True)).T:
folder.newpoint(coords=[(lon, lat)])
kml.save(os.path.join(self.orthoimage_dir, f"{self.field_id}_peaks_aligned.kml"))
class AlignCroplines(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def rotation2d(
deg: float
) -> np.array:
a = np.deg2rad(deg)
return np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
def findHoughAnglesNested(
self,
image: np.ndarray,
i_max: int,
steps: int,
bin_tolerance: int
) -> Tuple[np.array, np.array, np.array, np.array, np.array]:
test_angles = np.linspace(-np.pi/2, np.pi/2, steps, endpoint=False)
mean, std = 0, np.pi/2
for i in range(i_max):
logger.info(f"{self.name} -> Iteration {i}/{i_max} -> Perform Hough transform for {steps} angles in [{np.rad2deg(test_angles.min())}, {np.rad2deg(test_angles.max())}]°.")
h, theta, d = hough_line(image, theta=test_angles)
_, angles, dists = hough_line_peaks(h, theta, d)
hist, bins = np.histogram(angles, bins=steps, range=(test_angles.min(), test_angles.max()))
mean = np.mean(angles)
std = np.std(angles, ddof=1)
a_min = bins[np.max((0, np.argmax(hist)-bin_tolerance))]
a_max = bins[np.min((steps, np.argmax(hist)+1+bin_tolerance))]
test_angles = np.linspace(a_min, a_max, steps)
if np.all(np.mean(angles) == angles):
logger.info(f"{self.name} -> Iteration {i}/{i_max} -> Terminate! Best alpha = {np.rad2deg(mean):.4f} °.")
return (angles, dists, h, theta, d)
else:
logger.info(f"{self.name} -> Iteration {i}/{i_max} -> alpha = ({np.rad2deg(mean):.4f} +/- {np.rad2deg(std):.4f}) °.")
logger.info(f"{self.name} -> Best alpha after {i_max} iterations = ({np.rad2deg(mean):.4f} +/- {np.rad2deg(std):.4f}) °.")
return (angles, dists, h, theta, d)
def plot(
self
):
logger.info(f"{self.name} -> Plot cropline rotation.")
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
ax = axes.ravel()
ax[0].imshow(self.hough_img, cmap=self.plot_cmap)
ax[0].set_title('image')
ax[1].imshow(self.hough_img, cmap=self.plot_cmap)
ax[1].set_ylim((self.hough_img.shape[0], 0))
ax[1].set_title('detected lines')
for angle, dist in zip(self.angles, self.dists):
(x0, y0) = dist * np.array([np.cos(angle), np.sin(angle)])
ax[1].axline((x0, y0), slope=np.tan(angle + np.pi/2))
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_04_rot_angle"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud_aligned: np.ndarray,
printdates: np.array,
field_id: str
):
self.field_id = field_id
point_cloud = point_cloud_aligned
self.printdates = printdates
# Hough transform with fixed resolution (cm/px)
res = 1 # cm/px
logger.info(f"{self.name} -> Bin point cloud into image with resolution {res} cm/px.")
self.hough_img, _, _ = np.histogram2d(*point_cloud.T,
bins=[
np.arange(point_cloud[:,0].min(), point_cloud[:,0].max(), res),
np.arange(point_cloud[:,1].min(), point_cloud[:,1].max(), res)
])
# perform iterative Hough line detection with nested intervals method
i_max = 50
steps = 180
bin_tolerance = 2
self.angles, self.dists, self.h, self.theta, self.d = self.findHoughAnglesNested(self.hough_img, i_max, steps, bin_tolerance)
self.alpha_best = np.rad2deg(np.mean(self.angles))
self.alpha_best_std = np.rad2deg(np.std(self.angles, ddof=1))
# median cropline distance
d_cl_median = np.median(np.diff(np.sort(self.dists))) * res # px * (cm/px) = cm
coords_rot = (self.rotation2d(self.alpha_best)@point_cloud.T).T
logger.info(f"{self.name} -> Croplines rotated with best angle: ({self.alpha_best:.4f} +/- {self.alpha_best_std:.4f}) °. Median cropline distance: {d_cl_median:.4f} cm.")
self.save(obj=coords_rot, name="point_cloud_rotated", type_="pickle")
self.save(obj=self.alpha_best, name="rotation_angle", type_="json")
self.save(obj=d_cl_median, name="median_cropline_distance", type_="json")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class FindCroplines(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot_peaks(
self
):
logger.info(f"{self.name} -> Plot cropline peak positions.")
fig, ax = plt.subplots()
ax.plot(self.y_test, self.incl_points_sum)
ax.scatter(self.y_test[self.peak_pos], self.incl_points_sum[self.peak_pos], s=20, c='r', label=f"{len(self.peak_pos)} peaks")
ax.set(xlabel='position of window center (y-coords of rotated points)', ylabel='points inside window',
xlim=(self.Y.min()-self.scan_window, self.Y.max()+self.scan_window), ylim=(0,None))
ax.legend()
ax.set(title=f"{self.field_id}\ncropline peaks")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_05_cropline_peaks"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_croplines(self):
logger.info(f"{self.name} -> Plot rotated points with marked croplines.")
fig, ax = plt.subplots()
ax.scatter(*self.point_cloud.T, s=2, alpha=1, c="C0")
ax.hlines(self.croplines_ypos, xmin = self.point_cloud[:,0].min(), xmax = self.point_cloud[:,0].max(), color='r')
ax.set(title=f"{self.field_id}\nrotated points with croplines", xlabel='x - mean (rotated)', ylabel='y - mean (rotated)', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_06_croplines"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud_rotated: np.ndarray,
field_id: str,
median_cropline_distance: float,
px_resolutions: np.ndarray
):
self.field_id = field_id
self.point_cloud = point_cloud_rotated
self.Y = self.point_cloud[:,1]
scan_resolution = 10000 # steps per cropline
self.scan_window = median_cropline_distance / 10
self.scan_precision = median_cropline_distance / scan_resolution
logger.info(f"{self.name} -> Given cropline distance estimate of {median_cropline_distance} cm results in a scan window of {self.scan_window} cm and precision of {self.scan_precision} cm.")
self.y_test = np.arange(self.Y.min()-self.scan_window, self.Y.max()+self.scan_window, self.scan_precision)
incl_points_sum = []
for y_center in self.y_test:
incl_points_sum.append(np.sum((self.Y >= y_center-(self.scan_window/2)) & (self.Y <= y_center+(self.scan_window/2))))
self.incl_points_sum = np.asarray(incl_points_sum)
self.peak_pos = find_peaks(self.incl_points_sum, distance=int(0.75*scan_resolution))[0]
self.croplines_ypos = self.y_test[self.peak_pos]
logger.info(f"{self.name} -> {len(self.croplines_ypos)} croplines found: {self.croplines_ypos}")
self.save(obj=self.croplines_ypos, name="croplines_ypos", type_="pickle")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_peaks()
self.plot_croplines()
gc.collect()
class FilterWeed(Task):
def __init__(
self,
threshold_factor: float,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.threshold_factor = threshold_factor
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
@staticmethod
def find_nearest(
array: np.array,
values: np.array
) -> np.array:
indices = np.abs(np.subtract.outer(array, values)).argmin(axis=0)
return array[indices]
def plot(
self
):
logger.info(f"{self.name} -> Plot point cloud with masked weed.")
fig, ax = plt.subplots()
ax.scatter(*self.point_cloud_aligned_filtered.T, s=5, alpha=1, label="valid")
ax.scatter(*self.point_cloud_aligned[~self.weedmask].T, s=5, alpha=1, color='r', label=f"Weed ({self.weed_percentage:.2f} %)")
ax.set(title=f"{self.field_id}\nmasked weed", xlabel='x - mean [cm]', ylabel='y - mean [cm]', aspect='equal')
ax.legend()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_07_weed_mask"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud_rotated: np.ndarray,
point_cloud_aligned: np.ndarray,
point_cloud: np.ndarray,
layers: np.array,
croplines_ypos: np.array,
field_id: str
):
self.field_id = field_id
self.point_cloud_aligned = point_cloud_aligned
median_line_distance = np.median(np.diff(croplines_ypos))
next_line_distance = np.abs(point_cloud_rotated[:,1] - self.find_nearest(croplines_ypos, point_cloud_rotated[:,1]))
logger.info(f"{self.name} -> Calculated median seeding line distance: {median_line_distance:.2f} cm. Masking weed with threshold factor {self.threshold_factor}.")
self.weedmask = next_line_distance <= self.threshold_factor*median_line_distance
self.weed_percentage = 100*np.sum(~self.weedmask)/len(point_cloud_aligned)
if self.weed_percentage < 30:
logger.info(f"{self.name} -> {np.sum(~self.weedmask)} points masked as weed ({self.weed_percentage:.2f} %).")
else:
logger.warn(f"{self.name} -> High percentage of points masked as weed ({self.weed_percentage:.2f} %). There might be an error in the analysis.")
self.point_cloud_aligned_filtered, point_cloud_rotated_filtered, point_cloud_filtered, layers_filtered = point_cloud_aligned[self.weedmask], point_cloud_rotated[self.weedmask], point_cloud[self.weedmask], layers[self.weedmask]
self.save(obj=self.weedmask, name="weedmask", type_="pickle")
self.save(obj=self.point_cloud_aligned_filtered, name="point_cloud_aligned_weedfiltered", type_="pickle")
self.save(obj=point_cloud_rotated_filtered, name="point_cloud_rotated_weedfiltered", type_="pickle")
self.save(obj=point_cloud_filtered, name="point_cloud_weedfiltered", type_="pickle")
self.save(obj=layers_filtered, name="layers_weedfiltered", type_="pickle")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class GroupPoints(Task):
def __init__(
self,
max_centroid_distance: float
):
super().__init__()
self.max_centroid_distance = max_centroid_distance
def run(
self,
point_cloud_weedfiltered: np.array,
point_cloud_aligned_weedfiltered: np.array,
point_cloud_rotated_weedfiltered: np.array,
layers_weedfiltered: np.array
):
labels, centroids = group_points(point_cloud_aligned_weedfiltered, layers_weedfiltered, max_dist=self.max_centroid_distance)
labels_dist = np.bincount(np.bincount(labels[labels>=0]))[1:]
logger.info(f"{self.name} -> {labels.max()+1} groups found with distribution {labels_dist}, {np.sum(labels==-1)}/{len(labels)} points discarded.")
# filter discarded points out
point_cloud_aligned_weedfiltered = point_cloud_aligned_weedfiltered[labels>=0]
point_cloud_rotated_weedfiltered = point_cloud_rotated_weedfiltered[labels>=0]
point_cloud_weedfiltered = point_cloud_weedfiltered[labels>=0]
layers_weedfiltered = layers_weedfiltered[labels>=0]
labels = labels[labels>=0]
self.save(obj=point_cloud_weedfiltered, name="point_cloud_weedfiltered_grouped", type_="pickle")
self.save(obj=point_cloud_aligned_weedfiltered, name="point_cloud_aligned_weedfiltered_grouped", type_="pickle")
self.save(obj=point_cloud_rotated_weedfiltered, name="point_cloud_rotated_weedfiltered_grouped", type_="pickle")
self.save(obj=labels, name="group_labels", type_="pickle")
self.save(obj=layers_weedfiltered, name="layers_weedfiltered_grouped", type_="pickle")
class SortGroupLabels(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def centroid(
points
):
return points.mean(axis=0)
@staticmethod
def find_nearest_index(
array,
values
):
indices = np.abs(np.subtract.outer(array, values)).argmin(axis=0)
return indices
def plot(
self
):
logger.info(f"{self.name} -> Plot sorted and unsorted group labels.")
fig, ax = plt.subplots(1, 2, sharey=True)
ax[0].scatter(self.point_cloud[:,0], self.point_cloud[:,1], s=1, c=self.group_labels, alpha=0.6, cmap=self.plot_cmap)
sc = ax[1].scatter(self.point_cloud[:,0], self.point_cloud[:,1], s=1, c=self.labels_sorted, alpha=0.6, cmap=self.plot_cmap)
cbar = fig.colorbar(sc, ax=ax)
cbar.set_label("group ID")
for a in ax:
a.set(xlabel='x - mean [cm]', aspect='equal')
ax[0].set(ylabel='y - mean [cm]')
fig.suptitle(f"{self.field_id}\nsort group IDs")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_08_sorted_labels"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
field_id: str,
point_cloud_rotated_weedfiltered_grouped: np.ndarray,
group_labels: np.array,
croplines_ypos: np.array
):
self.point_cloud = point_cloud_rotated_weedfiltered_grouped
self.group_labels = group_labels
self.field_id = field_id
self.labels_sorted = -1*np.ones_like(group_labels)
group_centroids = np.array([self.centroid(self.point_cloud[group_labels == l]) for l in range(group_labels.max()+1)])
group_cropline_ids = self.find_nearest_index(croplines_ypos, group_centroids[:,1])
group_order = np.lexsort((group_centroids[:,0], group_cropline_ids))
for l_old, l_new in enumerate(group_order):
self.labels_sorted[group_labels == l_new] = l_old
group_cropline_ids_sorted = group_cropline_ids[group_labels]
_, group_sizes = np.unique(self.labels_sorted, return_counts=True)
self.save(obj=self.labels_sorted, name="group_labels_sorted", type_="pickle")
self.save(obj=group_cropline_ids_sorted, name="group_cropline_ids_sorted", type_="pickle")
self.save(obj=group_sizes, name="group_sizes_sorted", type_="pickle")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class SavePlantsDataFrame(Task):
def __init__(
self,
save_dir: str
):
super().__init__()
self.save_dir = save_dir
def run(
self,
field_id: str,
dates: pd.DatetimeIndex,
cover_ratios: np.array,
gps_transforms: List,
px_resolutions: np.array,
utm_transforms: List,
point_cloud_mean: np.ndarray,
align_transform: Tuple[Union[np.array,np.ndarray]],
rotation_angle: float,
layers_weedfiltered_grouped: np.array,
group_sizes_sorted: np.array,
group_cropline_ids_sorted: np.array,
point_cloud_weedfiltered_grouped: np.ndarray,
point_cloud_aligned_weedfiltered_grouped: np.ndarray,
group_labels_sorted: np.array,
croplines_ypos: np.array
):
# back-transform peak position data from cm (UTM) into GPS coordinates
point_cloud_weedfiltered_grouped_gps = np.hstack([utm_transforms[l](*point_cloud_weedfiltered_grouped[layers_weedfiltered_grouped == l].T/100., inverse=True) for l in np.unique(layers_weedfiltered_grouped)]).T
(scaF, rotA, traV, median_conf) = align_transform
align_transform_ = np.vstack((scaF, rotA, traV[:,0], traV[:,1])).T # cm
group_centroids = np.array([point_cloud_aligned_weedfiltered_grouped[group_labels_sorted == l].mean(axis=0) for l in range(group_labels_sorted.max()+1)]) # cm
n_layers = len(dates)
df_meta = pd.DataFrame()
for i in range(len(dates)):
df_meta = df_meta.append(
dict([("field_id" , field_id),
("date" , dates.values[i]),
("cover_ratio" , cover_ratios[i]), # %
("xy_center" , point_cloud_mean), # cm (UTM)
("align_median_confidence" , median_conf[i]), # %
("align_transform" , align_transform_[i]), # cm (UTM)
("gps_transform" , gps_transforms[i]), # px <-> lonlat
("px_resolution" , px_resolutions[i]), # cm/px
("utm_transform" , utm_transforms[i]), # m (UTM) <-> lonlat
("rotation_angle" , rotation_angle)]), ignore_index=True) # degree
df_plants = pd.DataFrame()
for i in range(len(group_labels_sorted)):
df_plants = df_plants.append(
dict([("field_id" , field_id),
("date" , dates.values[layers_weedfiltered_grouped[i]]),
("group_id" , group_labels_sorted[i]),
("group_size" , group_sizes_sorted[group_labels_sorted[i]]),
("group_cropline_id" , group_cropline_ids_sorted[i]),
("xy_cm" , point_cloud_weedfiltered_grouped[i]), # cm (UTM)
("xy_px" , list(rowcol(gps_transforms[np.argmax(dates.values==dates.values[layers_weedfiltered_grouped[i]])], *point_cloud_weedfiltered_grouped_gps[i]))), # px
("lonlat" , point_cloud_weedfiltered_grouped_gps[i]), # lonlat
("xy_centered_aligned_cm" , point_cloud_aligned_weedfiltered_grouped[i]), # cm (UTM)
("xy_centroid_centered_aligned_cm" , group_centroids[group_labels_sorted[i]]), # cm (UTM)
("y_cropline_rotated_cm" , croplines_ypos[group_cropline_ids_sorted[i]]), # cm (UTM)
("centroid_dist_cm" , np.sqrt(np.sum((point_cloud_aligned_weedfiltered_grouped[i]-group_centroids[group_labels_sorted[i]])**2))), # cm (UTM)
("detected" , True)]), ignore_index=True)
logger.info(f"{self.name} -> Detected plants added to DataFrame.")
df_plants = df_plants.append(add_non_detected(df_plants[df_plants["group_size"] < n_layers], df_meta))
df_plants["field_id"] = df_plants["field_id"].astype(str)
df_plants["group_id"] = df_plants["group_id"].astype(int)
df_plants["group_size"] = df_plants["group_size"].astype(int)
df_plants["group_cropline_id"] = df_plants["group_cropline_id"].astype(int)
df_plants["detected"] = df_plants["detected"].astype(bool)
df_plants = df_plants.sort_values(by=["group_id", "date"], ignore_index=True)
ndates = len(df_plants["date"].value_counts())
logger.info(f"{self.name} -> Complemented DataFrame with non-detected plant positions. {ndates}/{len(dates.values)} dates available.")
makeDirectory(self.save_dir)
plants_save_path = os.path.join(self.save_dir, f"{field_id}_plants.pkl")
meta_save_path = os.path.join(self.save_dir, f"{field_id}_meta.pkl")
try:
df_plants.to_pickle(plants_save_path)
logger.info(f"{self.name} -> DataFrame with plants saved at {plants_save_path}.")
df_meta.to_pickle(meta_save_path)
logger.info(f"{self.name} -> DataFrame with metadata saved at {meta_save_path}.")
except:
logger.error(f"{self.name} -> Could not save DataFrames.")
self.save(obj="", name="_dummy", type_="json")
class EvaluateDetectionQuality(Task):
def __init__(
self,
df_dir: str,
image_dir: str,
ground_truth_dir: str,
image_channels: List[str],
max_distance: float,
save_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: float
):
super().__init__()
self.df_dir = df_dir
self.image_dir = image_dir
self.ground_truth_dir = ground_truth_dir
self.image_channels = np.asarray(image_channels)
self.max_distance = max_distance
self.save_dir = save_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot(
self
):
logger.info(f"{self.name}-{self.date} -> Plot detections on image.")
fig, ax = plt.subplots(figsize=(self.width/1000, self.height/1000))
ax.imshow(self.img)
if self.kml_filepath != "":
ax.scatter(*self.gtxy.T[::-1], label=f"ground truth ({len(self.gtxy)})", s = 10, color="C0", alpha=0.5, marker="o")
if self.pxy_direct != []:
ax.scatter(*self.pxy_direct.T[::-1], label=f"direct detection ({len(self.pxy_direct)})", color="C2", s=1)
if self.pxy_indirect != []:
ax.scatter(*self.pxy_indirect.T[::-1], label=f"indirect detection ({len(self.pxy_indirect)})", color="C3", s=1)
ax.legend()
if self.kml_filepath != "":
ax.set(title = f"{self.field_id}@{self.date}\nRecall = {100 * self.TP/(self.TP+self.FN):.2f} %\nPrecision = {100 * self.TP/(self.TP+self.FP):.2f} %")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date}_detections_gt"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
else:
ax.set(title = f"{self.field_id}@{self.date}")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date}_detections"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
field_id: str,
dates: pd.DatetimeIndex,
gps_transforms: List,
utm_transforms: List,
px_resolutions: np.array
):
self.field_id = field_id
px_res = np.mean(px_resolutions)
plants_all = pd.read_pickle(os.path.join(self.df_dir, f"{self.field_id}_plants.pkl"))
#meta = pd.read_pickle(os.path.join(self.df_dir, f"{self.field_id}_meta.pkl"))
# filter out all inderect detections before the first detection happend
drop_ind = []
sorted_dates = sorted(dates)
for g_id in np.unique(plants_all.group_id):
group = plants_all[plants_all.group_id == g_id]
first_detection_date = sorted_dates[group.detected.argmax()]
drop_ind.extend(group.index[group.date < first_detection_date])
plants = plants_all.drop(drop_ind)
logger.info(f"{self.name} -> Filtered out leading indirect detections: {len(plants)}/{len(plants_all)} ({100.*len(plants)/len(plants_all):.2f} %) remaining.")
del plants_all
results = dict()
# iterate over all dates
for date, utm_transform, gps_transform in zip(dates, utm_transforms, gps_transforms):
self.date = date._date_repr
filedate = self.date.replace("-","")
logger.info(f"{self.name}-{self.date} -> Calculate detection quality.")
# retrieve image and shape file, if available
kml_filepath = list(glob(f"{self.ground_truth_dir}/{field_id}_{filedate}*.kml"))
tif_filepath = list(glob(f"{self.image_dir}/{field_id}_{filedate}*.tif"))[0]
if len(kml_filepath) > 1:
logger.warn(f"{self.name}-{self.date} -> Multiple ground truth shape files found for image {os.path.basename(tif_filepath)}. ",
f"Take first one in list: {os.path.basename(kml_filepath[0])}.")
self.kml_filepath = kml_filepath[0]
elif len(kml_filepath) == 1:
logger.info(f"{self.name}-{self.date} -> Ground truth shape file found.")
self.kml_filepath = kml_filepath[0]
else:
logger.warn(f"{self.name}-{self.date} -> No ground truth shape file found.")
self.kml_filepath = ""
# if ground truth data available, load positions
if self.kml_filepath != "":
try:
gtlatlon = readCoordsFromKml(self.kml_filepath)
gtutm = np.asarray(utm_transform(*gtlatlon.T)).T
self.gtxy = np.asarray(rowcol(gps_transform, xs=gtlatlon[:,0], ys=gtlatlon[:,1], op=lambda x: x)).T
except Exception as e:
logger.warn(f"{self.name}-{self.date} -> Could not load shape file. Error: {e}. Continue without ground truth data.")
gtutm = []
self.gtxy = []
self.kml_filepath = ""
else:
gtutm = []
self.gtxy = []
# load indirect and (if available direct) detections
try:
self.pxy_indirect = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==False)]["xy_px"].values)
plonlat_indirect = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==False)]["lonlat"].values)
except:
self.pxy_indirect = []
plonlat_indirect = []
try:
self.pxy_direct = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==True)]["xy_px"].values)
plonlat_direct = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==True)]["lonlat"].values)
except:
self.pxy_direct = []
plonlat_direct = []
if (plonlat_indirect != []) and (plonlat_direct != []):
plonlat = np.vstack((plonlat_indirect, plonlat_direct))
elif plonlat_indirect != []:
plonlat = plonlat_indirect
else:
plonlat = plonlat_direct
pxy_utm = np.asarray(utm_transform(*plonlat.T)).T
# initalize results dictionary
results[self.date] = {
"true_positive": np.nan,
"false_positive": np.nan,
"false_negative": np.nan
}
# connect detection with ground truth and extract true/false positives and false negatives
if self.kml_filepath != "":
logger.info(f"{self.name}-{self.date} -> Compare detections with ground truth plant positions (max. tolerance radius: {self.max_distance} cm.")
nn = NearestNeighbors(n_neighbors=1).fit(gtutm)
dist, ind = map(lambda x: x.flatten(), nn.kneighbors(pxy_utm))
self.TP, self.FP, self.FN = 0, 0, 0
for i in range(len(gtutm)):
i_dist = dist[ind == i]
in_radius = i_dist <= self.max_distance/100.
if np.sum(in_radius) > 0:
self.TP += 1
self.FP += len(i_dist) - 1
else:
self.FN += 1
self.FP += len(i_dist)
results[self.date]["true_positive"] = self.TP
results[self.date]["false_positive"] = self.FP
results[self.date]["false_negative"] = self.FN
if self.plot_result:
self.img = read_raster(tif_filepath, self.image_channels, ["R", "G", "B"])
self.img /= np.nanmax(self.img)
self.height, self.width, n_channels = self.img.shape
makeDirectory(self.plot_dir)
self.plot()
del self.img
gc.collect()
# write results to a DataFrame
logger.info(f"{self.name} -> Write results to DataFrame.")
quality_df = pd.DataFrame()
for date, values in results.items():
quality_df = quality_df.append(
dict([("field_id" , self.field_id),
("date" , date),
("true_positive" , values["true_positive"]),
("false_positive" , values["false_positive"]),
("false_negative" , values["false_negative"])]), ignore_index=True)
quality_df["precision"] = 100 * quality_df["true_positive"]/(quality_df["true_positive"]+quality_df["false_positive"])
quality_df["recall"] = 100 * quality_df["true_positive"]/(quality_df["true_positive"]+quality_df["false_negative"])
quality_df["true_positive"] = quality_df["true_positive"].apply(lambda x: int(x) if
|
pd.notna(x)
|
pandas.notna
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name='TOTAL')
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['TOTAL', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('TOTAL', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['TOTAL']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
exp_rows.name = 'TOTAL'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
for margins_name in [666, None, ['a', 'b']]:
with pytest.raises(ValueError):
crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name=margins_name)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')],
names=['b', 'c'])
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index =
|
Index([1.0, 2.0, 'All'], name='a')
|
pandas.Index
|
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
import streamlit as st
# pdf libraries
import fitz
@st.cache(allow_output_mutation=False, show_spinner=False)
def unfold_column(
df,
colum_to_unfold="paragraph",
columns_to_keep=[],
include_info=True
):
"""
paragraph from list to row in a dataframe
"""
paragraph_l = []
for i, p_l in enumerate(df[colum_to_unfold]):
for j, p in enumerate(p_l):
# columns to keep and a paragraph each time
data_dict = {}
if len(columns_to_keep) > 0:
for c in columns_to_keep:
data_dict[c] = df[c][i]
data_dict[colum_to_unfold] = p
if include_info:
data_dict["tot_p"] = len(p_l)
data_dict["p_nr"] = j+1
# construct data frame
df_p_to_row = pd.DataFrame.from_dict(data_dict, orient='index').T
paragraph_l.append(df_p_to_row)
return
|
pd.concat(paragraph_l)
|
pandas.concat
|
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=FEsingle.InputChgSum(df_all,5,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,5,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,5,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,12,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,12,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,12,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,25,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,25,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,25,'net_mf_amount')
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<6]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23_pos(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=
|
pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
|
pandas.merge
|
from .config import error_analysis, sample_data, CORRELATION, CORRELATION_THRESHOLD, VERBOSE
from .context import pandas_ta
from unittest import TestCase, skip
import pandas.testing as pdt
from pandas import DataFrame, Series
import talib as tal
class TestMomentum(TestCase):
@classmethod
def setUpClass(cls):
cls.data = sample_data
cls.data.columns = cls.data.columns.str.lower()
cls.open = cls.data['open']
cls.high = cls.data['high']
cls.low = cls.data['low']
cls.close = cls.data['close']
if 'volume' in cls.data.columns: cls.volume = cls.data['volume']
@classmethod
def tearDownClass(cls):
del cls.open
del cls.high
del cls.low
del cls.close
if hasattr(cls, 'volume'): del cls.volume
del cls.data
def setUp(self): pass
def tearDown(self): pass
def test_datetime_ordered(self):
# Test if datetime64 index and ordered
result = self.data.ta.datetime_ordered
self.assertTrue(result)
# Test if not ordered
original = self.data.copy()
reversal = original.ta.reverse
result = reversal.ta.datetime_ordered
self.assertFalse(result)
# Test a non-datetime64 index
original = self.data.copy()
original.reset_index(inplace=True)
result = original.ta.datetime_ordered
self.assertFalse(result)
def test_reverse(self):
original = self.data.copy()
result = original.ta.reverse
# Check if first and last time are reversed
self.assertEqual(result.index[-1], original.index[0])
self.assertEqual(result.index[0], original.index[-1])
def test_ao(self):
result = pandas_ta.ao(self.high, self.low)
self.assertIsInstance(result, Series)
self.assertEqual(result.name, 'AO_5_34')
def test_apo(self):
result = pandas_ta.apo(self.close)
self.assertIsInstance(result, Series)
self.assertEqual(result.name, 'APO_12_26')
try:
expected = tal.APO(self.close)
pdt.assert_series_equal(result, expected, check_names=False)
except AssertionError as ae:
try:
corr = pandas_ta.utils.df_error_analysis(result, expected, col=CORRELATION)
self.assertGreater(corr, CORRELATION_THRESHOLD)
except Exception as ex:
error_analysis(result, CORRELATION, ex)
def test_bias(self):
result = pandas_ta.bias(self.close)
self.assertIsInstance(result, Series)
self.assertEqual(result.name, 'BIAS_SMA_26')
def test_bop(self):
result = pandas_ta.bop(self.open, self.high, self.low, self.close)
self.assertIsInstance(result, Series)
self.assertEqual(result.name, 'BOP')
try:
expected = tal.BOP(self.open, self.high, self.low, self.close)
|
pdt.assert_series_equal(result, expected, check_names=False)
|
pandas.testing.assert_series_equal
|
import uuid
from datetime import datetime
from typing import List
import onnxmltools
import pandas as pd
from pandas import DataFrame
from pandas.api.types import is_datetime64_any_dtype as is_datetime
import os
from refit.enums.model_format import ModelFormat
from refit.flink import submit
from refit.flink.refit_feature_extractor import RefitFeatureExtractor
from refit.repository.file_repository import FileRepository
from refit.repository.notebook_repository import NotebookRepository
from refit.util import model_factory
from refit.util.dataframe_helpers import extract_flag
from refit.util.refit_config import RefitConfig
def submit_job(feature_extractor=None):
submit.clear_jobs()
submit.submit_python(feature_extractor)
class Refit:
def __init__(self,
project_guid: str,
config: RefitConfig = None,
notebook_repository: NotebookRepository = None,
file_repository: FileRepository = None):
config = RefitConfig() if config is None else config
self._import_bucket = config.minio_bucket_import
self._model_bucket = config.minio_bucket_models
self._schema_bucket = config.minio_bucket_schema
self.project_guid = project_guid
self.notebook_repository = NotebookRepository(
config.integrations_host) if notebook_repository is None else notebook_repository
self.file_repository = FileRepository(config) if file_repository is None else file_repository
self.schema = self.notebook_repository.get_schema(project_guid)
@staticmethod
def of(project_guid: str):
return Refit(project_guid)
def sensor_data(self,
start: datetime,
end: datetime,
sensors: list = None,
feature_extractor: RefitFeatureExtractor = None,
include_flag: bool = False,
flag_name: str = 'operable') -> DataFrame:
sensor_data_dicts = self.notebook_repository.sensor_data(
project_guid=self.project_guid,
start=start,
end=end,
sensors=sensors
)
df = pd.DataFrame(sensor_data_dicts)
if include_flag:
training_window_dicts = self.notebook_repository.training_window(
project_guid=self.project_guid,
start=start,
end=end,
sensors=sensors
)
if len(training_window_dicts):
training_window_df =
|
pd.DataFrame(training_window_dicts)
|
pandas.DataFrame
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Union
import numpy as np
from flash.core.data.io.input import DataKeys
from flash.core.data.utilities.data_frame import read_csv
from flash.core.utilities.imports import _PANDAS_AVAILABLE
from flash.tabular.input import TabularDataFrameInput
if _PANDAS_AVAILABLE:
from pandas.core.frame import DataFrame
else:
DataFrame = object
class TabularRegressionDataFrameInput(TabularDataFrameInput):
def load_data(
self,
data_frame: DataFrame,
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Dict[str, Any] = None,
):
cat_vars, num_vars = self.preprocess(data_frame, categorical_fields, numerical_fields, parameters)
if not self.predicting:
targets = data_frame[target_field].to_numpy().astype(np.float32)
return [{DataKeys.INPUT: (c, n), DataKeys.TARGET: t} for c, n, t in zip(cat_vars, num_vars, targets)]
else:
return [{DataKeys.INPUT: (c, n)} for c, n in zip(cat_vars, num_vars)]
class TabularRegressionCSVInput(TabularRegressionDataFrameInput):
def load_data(
self,
file: Optional[str],
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Dict[str, Any] = None,
):
if file is not None:
return super().load_data(read_csv(file), categorical_fields, numerical_fields, target_field, parameters)
class TabularRegressionDictInput(TabularRegressionDataFrameInput):
def load_data(
self,
data: Dict[str, List[Any]],
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Dict[str, Any] = None,
):
data_frame = DataFrame.from_dict(data)
return super().load_data(data_frame, categorical_fields, numerical_fields, target_field, parameters)
class TabularRegressionListInput(TabularRegressionDataFrameInput):
def load_data(
self,
data: List[Union[tuple, dict]],
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Dict[str, Any] = None,
):
data_frame =
|
DataFrame.from_records(data)
|
pandas.core.frame.DataFrame.from_records
|
import numpy as np
import argparse
import copy
import glob
import json
import logging
import os
import pickle
import random
import shutil
import sys
import tempfile
from os.path import isdir, join
from pathlib import Path
from tqdm.std import tqdm
import subprocess
import pandas as pd
# sys.path.append(r"/media/medical/gasperp/projects")
# import utilities
# sys.path.append(r"/media/medical/gasperp/projects/surface-distance")
from surface_distance import compute_metrics_deepmind
def main():
# Set parser
parser = argparse.ArgumentParser(
prog="nnU-Net prediction generating script",
description="Generate & evaluate predictions",
)
parser.add_argument(
"-t",
"--task_number",
type=int,
required=True,
help="three digit number XXX that comes after TaskXXX_YYYYYY",
)
parser.add_argument(
"-f",
"--fold",
type=str,
default=None,
choices=["0", "1", "2", "3", "4", "all"],
help="default is None (which means that script automatically determines fold if the is only one fold subfolder, otherwise raises error)",
)
parser.add_argument(
"-o",
"--out_dir",
type=str,
default=None,
help="directory to store output csv file and predictions (if --save_seg_masks is enabled)",
)
parser.add_argument(
"--save_seg_masks",
default=False,
action="store_true",
help="if this option is used, output segmentations are stored to out_dir",
)
parser.add_argument(
"-conf",
"--configuration",
type=str,
default="3d_fullres",
choices=["2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres"],
help="nnU-Net configuration",
)
parser.add_argument(
"-tr",
"--trainer_class_name",
type=str,
default=None,
help="nnU-Net trainer: default is None (which means that script automatically determines trainer class if the is only one trainer subfolder, otherwise raises error), common options are nnUNetTrainerV2, nnUNetTrainerV2_noMirroringAxis2",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default="model_final_checkpoint", # this means that 'model_final_checkpoint.model.pkl' is used for inference
help="nnU-Net model to use: default is final model, but in case inference is done before model training is complete you may want to specifify 'model_best.model.pkl' or sth else",
)
parser.add_argument(
"--just_test",
default=False,
action="store_true",
help="just test",
)
parser.add_argument(
"--gpus",
nargs="+",
type=str,
default=None,
help="if specified, GPU is utilized to speed up inference",
)
parser.add_argument(
"--num_threads_preprocessing",
type=int,
default=1,
help="nnUNet_predict parameter",
)
parser.add_argument(
"--num_threads_nifti_save",
type=int,
default=4,
help="nnUNet_predict parameter",
)
parser.add_argument(
"--mode", type=str, default="normal", help="nnUNet_predict parameter",
)
parser.add_argument(
"--disable_tta",
default=False,
action="store_true",
help="nnUNet_predict parameter",
)
parser.add_argument(
"--direct_method",
default=False,
action="store_true",
help="method for getting predictions",
)
parser.add_argument(
"--step_size", type=float, default=0.5, required=False, help="don't touch"
)
parser.add_argument(
"--all_in_gpu",
type=str,
default="None",
required=False,
help="can be None, False or True",
)
# running in terminal
args = vars(parser.parse_args())
all_in_gpu = args["all_in_gpu"]
assert all_in_gpu in ["None", "False", "True"]
if all_in_gpu == "None":
all_in_gpu = None
elif all_in_gpu == "True":
all_in_gpu = True
elif all_in_gpu == "False":
all_in_gpu = False
all_in_gpu = args["all_in_gpu"]
assert all_in_gpu in ["None", "False", "True"]
if all_in_gpu == "None":
all_in_gpu = None
elif all_in_gpu == "True":
all_in_gpu = True
elif all_in_gpu == "False":
all_in_gpu = False
# paths definition
nnUNet_raw_data_base_dir = os.environ["nnUNet_raw_data_base"]
nnUNet_preprocessed_dir = os.environ["nnUNet_preprocessed"]
nnUNet_trained_models_dir = os.environ["RESULTS_FOLDER"]
nnUNet_configuration_dir = join(
nnUNet_trained_models_dir, "nnUNet", args["configuration"]
)
base_nnunet_dir_on_medical = "/media/medical/projects/head_and_neck/nnUnet"
csv_name = "results"
## checkers for input parameters
# input task
existing_tasks = {
int(i.split("_")[0][-3:]): join(nnUNet_configuration_dir, i)
for i in os.listdir(nnUNet_configuration_dir)
if i.startswith("Task") and os.path.isdir(join(nnUNet_configuration_dir, i))
}
assert (
args["task_number"] in existing_tasks.keys()
), f"Could not find task num.: {args['task_number']}. Found the following task/directories: {existing_tasks}"
task_dir = existing_tasks[args["task_number"]]
# e.g.: '/storage/nnUnet/nnUNet_trained_models/nnUNet/3d_fullres/Task152_onkoi-2019-batch-1-and-2-both-modalities-biggest-20-organs-new'
task_name = Path(task_dir).name
# e.g.: task_name = 'Task152_onkoi-2019-batch-1-and-2-both-modalities-biggest-20-organs-new'
## checkers for input parameters
# nnunet trainer class
trainer_classes_list = [i.split("__")[0] for i in os.listdir(task_dir)]
assert len(trainer_classes_list) > 0, f"no trainer subfolders found in {task_dir}"
if args["trainer_class_name"] is None:
if len(trainer_classes_list) > 1:
ValueError(
f"Cannot automatically determine trainer class name, since multiple trainer class folders were found in {task_dir}. \nPlease specfiy exact '--trainer_class_name'"
)
else:
args["trainer_class_name"] = trainer_classes_list[0]
## checkers for input parameters
# nnunet plans list
# determine which plans version was used, raise error if multiple plans exist
plans_list = [
i.split("__")[-1]
for i in os.listdir(task_dir)
if i.startswith(f"{args['trainer_class_name']}__")
]
assert (
len(plans_list) == 1
), f"multiple trainer_classes_and_plans dirs found {plans_list}, please specify which to use"
args["plans_name"] = plans_list[0]
args[
"trainer_classes_and_plans_dir_name"
] = f"{args['trainer_class_name']}__{args['plans_name']}"
trainer_classes_and_plans_dir = join(
task_dir, args["trainer_classes_and_plans_dir_name"]
)
## checkers for input parameters
# fold
available_folds = [
i
for i in os.listdir(trainer_classes_and_plans_dir)
if os.path.isdir(join(trainer_classes_and_plans_dir, i))
]
if "gt_niftis" in available_folds:
available_folds.remove("gt_niftis")
assert (
len(available_folds) > 0
), f"no fold subfolders found in {trainer_classes_and_plans_dir}"
if args["fold"] is None:
if len(available_folds) > 1:
ValueError(
f"Cannot automatically determine fold, since multiple folds were found in {trainer_classes_and_plans_dir}. \nPlease specfiy exact '--fold'"
)
else:
get_fold_num = lambda s: int(s.split("_")[-1]) if s != "all" else s
args["fold"] = get_fold_num(available_folds[0])
if args["fold"] != "all":
# if args['fold'] is 0/1/2/3/4, convert it to 'fold_X' else keep 'all'
args["fold_str"] = f"fold_{args['fold']}"
else:
args["fold_str"] = args["fold"]
assert (
args["fold_str"] in available_folds
), f"--fold {args['fold']} is not a valid options, available_folds are: {available_folds}"
## checkers for input parameters
# nnunet model checkpoint to be used for inference
models_checkpoints_dir = join(trainer_classes_and_plans_dir, args["fold_str"])
models_checkpoints_dir_files = os.listdir(models_checkpoints_dir)
assert any(
[args["checkpoint_name"] in i for i in models_checkpoints_dir_files]
), f"--checkpoint_name {args['checkpoint_name']} is not a valid options, checkpoint_name should be a file in {models_checkpoints_dir}. Files in this directory are: {models_checkpoints_dir_files}"
## data paths retrieval
# get dict, dict of dict of filepaths: {'train': {img_name: {'images': {modality0: fpath, ...}, 'label': fpath} ...}, ...}
# load dataset json
with open(join(nnUNet_preprocessed_dir, task_name, "dataset.json"), "r") as fp:
dataset_json_dict = json.load(fp)
# create modalities dict
four_digit_ids = {
m: str.zfill(str(int(i)), 4) for i, m in dataset_json_dict["modality"].items()
}
# get image paths with modality four digit id
raw_data_dir = join(nnUNet_raw_data_base_dir, "nnUNet_raw_data", task_name)
img_modality_fpaths_dict = lambda fname, dir_name: {
modality: join(raw_data_dir, f"images{dir_name}", fname + f"_{m_id}.nii.gz")
for modality, m_id in four_digit_ids.items()
}
# generate label path
labels_fpath = lambda fname, dir_name: join(
raw_data_dir, f"labels{dir_name}", fname + ".nii.gz"
)
# generate images dict {modality: img_path}
splits_iterator = lambda fname_list, dir_name="Tr": {
img_name: {
"images": img_modality_fpaths_dict(img_name, dir_name=dir_name),
"label": labels_fpath(img_name, dir_name=dir_name),
}
for img_name in fname_list
}
# create dict with paths split on train, val (if fold not 'all') and test
splits_final_dict = {}
splits_final_dict["test"] = splits_iterator(
[Path(i).name[: -len(".nii.gz")] for i in dataset_json_dict["test"]],
dir_name="Ts",
)
if not args['just_test']:
if args["fold"] != "all":
with open(
join(nnUNet_preprocessed_dir, task_name, "splits_final.pkl"), "rb"
) as f:
_dict = pickle.load(f)
splits_final_dict["train"] = splits_iterator(_dict[int(args["fold"])]["train"])
splits_final_dict["val"] = splits_iterator(_dict[int(args["fold"])]["val"])
else:
splits_final_dict["train"] = splits_iterator(
[
Path(_dict["image"]).name[: -len(".nii.gz")]
for _dict in dataset_json_dict["training"]
]
)
images_source_dirs = [
join(raw_data_dir, "imagesTs"),
]
if not args['just_test']:
images_source_dirs.append(join(raw_data_dir, "imagesTr"))
config_str = f"FOLD-{args['fold']}_TRAINER-{args['trainer_class_name']}_PLANS-{args['plans_name']}_CHK-{args['checkpoint_name']}"
logging.info(f"settings info: {config_str}")
if args["out_dir"] is None:
args["out_dir"] = join(base_nnunet_dir_on_medical, task_name, "results")
os.makedirs(args["out_dir"], exist_ok=True)
# prepare temporary dir for predicted segmentations
base_tmp_dir = f"/tmp/nnunet/predict/{task_name}/{config_str}"
output_seg_dir = f"{base_tmp_dir}/out"
if os.path.exists(base_tmp_dir):
shutil.rmtree(base_tmp_dir)
os.makedirs(output_seg_dir)
# prepare directories in case predicted segmentations are to be saved
if args["save_seg_masks"]:
pred_seg_out_dir = join(args["out_dir"], config_str)
out_dirs = {
"test": join(pred_seg_out_dir, "test"),
}
if not args['just_test']:
out_dirs["train"] = join(pred_seg_out_dir, "train")
out_dirs["val"] = join(pred_seg_out_dir, "val")
os.makedirs(out_dirs["train"], exist_ok=True)
if "val" in splits_final_dict.keys():
os.makedirs(out_dirs["val"], exist_ok=True)
os.makedirs(out_dirs["test"], exist_ok=True)
try:
if args['direct_method']:
from nnunet.inference.predict import predict_from_folder
model_folder_name = join(
nnUNet_configuration_dir,
trainer_classes_and_plans_dir
)
print("using model stored in ", model_folder_name)
assert isdir(model_folder_name), (
"model output folder not found. Expected: %s" % model_folder_name
)
for in_dir in images_source_dirs:
print(f'\n\nSTARTING predition for {in_dir}\n\n')
predict_from_folder(
model=model_folder_name,
input_folder=in_dir,
output_folder=output_seg_dir,
folds=[args["fold"]],
save_npz=False,
num_threads_preprocessing=args["num_threads_preprocessing"],
num_threads_nifti_save=args["num_threads_nifti_save"],
lowres_segmentations=None,
part_id=0,
num_parts=1,
tta=not args["disable_tta"],
overwrite_existing=False,
mode=args["mode"],
overwrite_all_in_gpu=all_in_gpu,
mixed_precision=not False,
step_size=args["step_size"],
checkpoint_name=args["checkpoint_name"],
)
else:
for in_dir in images_source_dirs:
cmd_list = [
"nnUNet_predict",
"-i",
in_dir,
"-o",
output_seg_dir,
"-t",
args["task_number"],
"-m",
args["configuration"],
"-f",
args["fold"],
"-tr",
args["trainer_class_name"],
"-chk",
args["checkpoint_name"],
"--num_threads_preprocessing",
args["num_threads_preprocessing"],
"--num_threads_nifti_save",
args["num_threads_nifti_save"],
"--mode",
args["mode"],
"--disable_tta" if args["disable_tta"] else None,
]
cmd_list = [str(i) for i in cmd_list if i]
logging.info(f"Final command for nnU-Net prediction: {cmd_list}")
# set env variables
if args["gpus"]:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(args["gpus"])
logging.info(
f"Set env variables CUDA_VISIBLE_DEVICES to: {os.environ['CUDA_VISIBLE_DEVICES']}"
)
os.environ["MKL_THREADING_LAYER"] = "GNU"
# RUN command in terminal
subprocess_out = subprocess.run(cmd_list, check=True)
logging.info(f"Subprocess exit code was: {subprocess_out.returncode}")
logging.info(f"Successfully predicted seg masks from input dir: {in_dir}")
except Exception as e:
logging.error(f"Failed due to the following error: {e}")
shutil.rmtree(output_seg_dir)
sys.exit()
try:
organs_labels_dict = {
organ: int(lbl) for lbl, organ in dataset_json_dict["labels"].items()
}
logging.info(f"Found the following organs and labels: {organs_labels_dict}")
compute_metrics = compute_metrics_deepmind(
organs_labels_dict=organs_labels_dict
)
settings_info = {
"model_task_number": args["task_number"],
"model_task_name": task_name,
"fold": args["fold"],
"trainer_class": args["trainer_class_name"],
"plans_name": args["plans_name"],
"checkpoint": args["checkpoint_name"],
"prediction_mode": args["mode"],
}
dfs = []
for phase, phase_dict in tqdm(
splits_final_dict.items(), total=len(splits_final_dict)
):
settings_info["phase"] = phase
for fname, fname_dict in tqdm(phase_dict.items(), total=len(phase_dict)):
settings_info["fname"] = fname
gt_fpath = fname_dict["label"]
pred_fpath = join(output_seg_dir, fname + ".nii.gz")
out_dict_tmp = compute_metrics.execute(
fpath_gt=gt_fpath, fpath_pred=pred_fpath
)
df = pd.DataFrame.from_dict(out_dict_tmp)
for k, val in settings_info.items():
df[k] = val
dfs.append(df)
if args["save_seg_masks"]:
shutil.copy2(pred_fpath, join(out_dirs[phase], fname + ".nii.gz"))
csv_path = join(args["out_dir"], f"{csv_name}.csv")
if os.path.exists(csv_path):
logging.info(
f"Found existing .csv file on location {csv_path}, merging existing and new dataframe"
)
existing_df = [pd.read_csv(csv_path)] + dfs
|
pd.concat(existing_df, ignore_index=True)
|
pandas.concat
|
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import sys
import numpy as np
import os
import struct
import argparse
from array import array as pyarray
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score, recall_score, roc_auc_score, f1_score, matthews_corrcoef
import pandas as pd
parser = argparse.ArgumentParser(description='RF on data')
parser.add_argument("--data", help="raw or latent")
args = parser.parse_args()
if __name__ == '__main__':
if args.data == None:
print("Please specify raw or latent for data flag")
else:
dataset=args.data
svm_accuracy = []
svm_roc_auc = []
svm_precision = []
svm_recall = []
svm_f_score = []
svm_pred = []
svm_prob = []
svm_mcc = []
fp = pd.read_csv("diabimmune_metadata_allcountries_allergy_noQuotes.csv", index_col=3)
allergy = fp["allergy"]
allergy = pd.factorize(allergy)
subject = fp["subjectID"]
labels = allergy[1]
allergy = allergy[0]
subject_data = {'ID': subject, 'label': allergy}
split_df =
|
pd.DataFrame(data=subject_data)
|
pandas.DataFrame
|
import time
import random
import numpy as np
import pandas as pd
import hdbscan
import sklearn.datasets
from sklearn import metrics
from classix import CLASSIX
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import preprocessing
from tqdm import tqdm
from sklearn.cluster import MeanShift
from quickshift.QuickshiftPP import *
import matplotlib.pyplot as plt
from tqdm import tqdm
import seaborn as sns
plt.style.use('bmh')
seed = 0
np.random.seed(seed)
random.seed(seed)
def test_kmeanspp_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
kmeans = KMeans(n_clusters=i, init='k-means++', random_state=1)
kmeans.fit(X)
ri = metrics.adjusted_rand_score(y, kmeans.labels_)
mi = metrics.adjusted_mutual_info_score(y, kmeans.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_meanshift_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
meanshift = MeanShift(bandwidth=i)
meanshift.fit(X)
ri = metrics.adjusted_rand_score(y, meanshift.labels_)
mi = metrics.adjusted_mutual_info_score(y, meanshift.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_dbscan_labels(X=None, y=None, _range=np.arange(0.05, 0.505, 0.005), minPts=5):
ar = list()
am = list()
for i in _range:
dbscan = DBSCAN(eps=i, n_jobs=1, min_samples=minPts)
dbscan.fit(X)
ri = metrics.adjusted_rand_score(y, dbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, dbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_hdbscan_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
_hdbscan = hdbscan.HDBSCAN(min_cluster_size=int(i), algorithm='best')
_hdbscan.fit(X)
ri = metrics.adjusted_rand_score(y, _hdbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, _hdbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_quickshiftpp_labels(X=None, y=None, _range=np.arange(2, 17, 1), beta=0.3):
ar = list()
am = list()
for i in _range:
quicks = QuickshiftPP(k=i, beta=beta)
quicks.fit(X.copy(order='C'))
ri = metrics.adjusted_rand_score(y, quicks.memberships)
mi = metrics.adjusted_mutual_info_score(y, quicks.memberships)
ar.append(ri)
am.append(mi)
return ar, am
def test_classix_radius_labels(X=None, y=None, method=None, minPts=1, sorting='pca', _range=np.arange(0.05, 0.3, 0.005)):
ar = list()
am = list()
for i in _range:
classix = CLASSIX(radius=i, minPts=minPts, post_alloc=True, sorting=sorting,
group_merging=method, verbose=0)
classix.fit(X)
ri = metrics.adjusted_rand_score(y, classix.labels_)
mi = metrics.adjusted_mutual_info_score(y, classix.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def run_sensitivity_test(datasets, _range, clustering='CLASSIX (Density)', fix_k=1, sorting='pca', label_files=None):
np.random.seed(1)
X, y = datasets[0], datasets[1]
nonans = np.isnan(X).sum(1) == 0
X = X[nonans,:]
y = y[nonans]
X = (X - X.mean(axis=0)) / X.std(axis=0)
if clustering == 'CLASSIX (density)':
ari, ami = test_classix_radius_labels(X=X, y=y, method='density', minPts=fix_k, sorting=sorting, _range=_range)
elif clustering == 'CLASSIX (distance)':
ari, ami = test_classix_radius_labels(X=X, y=y, method='distance', minPts=fix_k, sorting=sorting, _range=_range)
elif clustering == 'HDBSCAN':
ari, ami = test_hdbscan_labels(X=X, y=y, _range=_range)
elif clustering == 'DBSCAN':
ari, ami = test_dbscan_labels(X=X, y=y, _range=_range, minPts=fix_k)
elif clustering == 'Quickshift++':
ari, ami = test_quickshiftpp_labels(X=X, y=y, _range=_range, beta=fix_k)
elif clustering == 'k-means++':
ari, ami = test_kmeanspp_labels(X=X, y=y, _range=_range)
elif clustering == 'Meanshift':
ari, ami = test_meanshift_labels(X=X, y=y, _range=_range)
else:
raise ValueError('Specify a concrete clustering algorithms.')
store_df = pd.DataFrame()
store_df['Range'] = _range
store_df['ARI'] = ari
store_df['AMI'] = ami
store_df.to_csv('results/exp5/{}'.format(label_files)+clustering+'.csv', index=False)
def visualize_params_global():
plt.style.use('default')
datasets = ['Banknote', 'Dermatology', 'Ecoli', 'Glass', 'Iris', 'Phoneme', 'WheatSeeds', 'Wine']
algorithms = ['Meanshift', 'DBSCAN', 'HDBSCAN', 'Quickshift++', 'CLASSIX (distance)', 'CLASSIX (density)']
plot_num = 1
fontsize = 60
band = [0.5, 0.01, 0.5, 0.5, 0.015, 0.015]
plt.figure(figsize=(8.5*len(datasets), 9*len(algorithms)))
for data in datasets:
i = 0
for algorithm in algorithms:
store_df = pd.read_csv('results/exp5/{}'.format(data)+algorithm+'.csv')
_range = store_df['Range'].values
ars = store_df['ARI'].values
ami = store_df['AMI'].values
plt.rcParams['axes.facecolor'] = 'white'
plt.subplot(len(datasets), len(algorithms), plot_num)
plt.plot(_range, ars, label='ARI', marker='o', markersize=20, c='red')
plt.plot(_range, ami, label='AMI', marker='*', markersize=18, c='darkorange')
plt.ylim(-.05, 1.05)
plt.xticks([min(_range), max(_range)])
plt.yticks([0.5, 1])
plt.xlim(-band[i]+min(_range), band[i]+max(_range))
if plot_num == len(algorithms):
plt.legend(fontsize=fontsize, ncol=2, bbox_to_anchor=(1, 1.5))
plt.tick_params(axis='both', labelsize=fontsize)
plt.grid(True)
plt.subplots_adjust(bottom=0.01, left=0.01, right=0.99, top=0.99, wspace=0.15, hspace=0.15)
plot_num = plot_num + 1
i = i + 1
plt.tight_layout()
plt.savefig('results/exp5/ARI_AMI_PARAMS.pdf', bbox_inches='tight')
def visualize_params(_range, clustering='CLASSIX (Density)', label_files=None, band=0.01, fig_interval=1):
# sns.set(font_scale=5)
store_df = pd.read_csv('results/exp5/{}'.format(label_files)+clustering+'.csv')
_range = store_df['Range'].values
ami = store_df['AMI'].values
ari = store_df['ARI'].values
plt.figure(figsize=(6, 3.6))
plt.rcParams['axes.facecolor'] = 'white'
# plt.rc('font', family='serif')
plt.plot(_range, ari, label='ARI',
marker='o', markersize=10, c='red')
plt.plot(_range, ami, label='AMI',
marker='*', markersize=8, c='darkorange')
plt.legend(fontsize=32, fancybox=True, loc='best')
plt.ylim(-.05, 1.05)
# plt.xticks(np.arange(min(_range), max(_range)+1, fig_interval))
plt.xticks([min(_range), max(_range)])
plt.yticks([0, 0.5, 1])
plt.xlim(-band+min(_range), band+max(_range))
plt.tick_params(axis='both', labelsize=32)
plt.savefig('results/exp5/{}'.format(label_files)+clustering+'.pdf', bbox_inches='tight')
# plt.show()
def params_search():
datasets = []
data = pd.read_csv('data/Real_data/Banknote_authentication.csv')
X_banknote = data.drop(['4'],axis=1).values
y_banknote = data['4'].values
# print("Shape of banknote data: ", data.shape, ", labels: ", len(set(y_banknote)))
datasets.append((X_banknote, y_banknote))
data = pd.read_csv("data/Real_data/Dermatology.csv").values
X_dermatology = data[:, :data.shape[1]-1]
y_dermatology = data[:, data.shape[1]-1]
# print("Shape of Dermatology data: ", data.shape, ", labels: ", len(set(y_dermatology)))
datasets.append((X_dermatology, y_dermatology))
data = pd.read_csv("data/Real_data/Ecoli.csv").values
X_ecoli = data[:, range(data.shape[1] - 1)]
y_ecoli = data[:, data.shape[1] - 1]
# print("Shape of Ecoli data: ", data.shape, ", labels: ", len(set(y_ecoli)))
datasets.append((X_ecoli,y_ecoli))
data = pd.read_csv("data/Real_data/Glass.csv")
le = preprocessing.LabelEncoder()
data['Glass'] = le.fit_transform(data['Glass'])
X_glass = data.drop(['Glass', 'Id'],axis=1).values
y_glass = data['Glass'].values
# print("Shape of Glass data: ", data.shape, ", labels: ", len(set(y_glass)))
datasets.append((X_glass, y_glass))
data = pd.read_csv("data/Real_data/Iris.csv")
le = preprocessing.LabelEncoder()
data['Species'] = le.fit_transform(data['Species'])
X_irirs = data.drop(['Species','Id'],axis=1).values
y_irirs = data['Species'].values
# print("Shape of Iris data: ", data.shape, ", labels: ", len(set(y_irirs)))
datasets.append((X_irirs,y_irirs))
data = pd.read_csv("data/Real_data/Phoneme.csv")
le = preprocessing.LabelEncoder()
data['g'] = le.fit_transform(data['g'])
X_phoneme = data.drop(['speaker', 'g'],axis=1).values
y_phoneme = data['g'].values
# print("Shape of Phoneme data: ", data.shape, ", labels: ", len(set(y_phoneme)))
datasets.append((X_phoneme, y_phoneme))
data = pd.read_csv('data/Real_data/Seeds.csv')
X_seeds = data.drop(['7'],axis=1).values
y_seeds = data['7'].values
# print("Shape of seeds data: ", data.shape, ", labels: ", len(set(y_seeds)))
datasets.append((X_seeds, y_seeds))
data = pd.read_csv("data/Real_data/Wine.csv")
X_wine = data.drop(['14'],axis=1).values
y_wine = data['14'].values
# print("Shape of Wine data: ", data.shape, ", labels: ", len(set(y_wine)))
datasets.append((X_wine, y_wine))
# ==========================================================================
# ****************************************************************Mean shift
fig_interval = 2
band = 0.5
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='Meanshift',
label_files='Banknote')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='Meanshift',
label_files='Dermatology')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='Meanshift',
label_files='Ecoli')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='Meanshift',
label_files='Glass')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='Meanshift',
label_files='Iris')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='Meanshift',
label_files='Phoneme')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='Meanshift',
label_files='WheatSeeds')
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='Meanshift',
label_files='Wine')
# ==========================================================================
# ****************************************************************DBSCAN
fig_interval = 0.1
band = 0.01
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Banknote')
_range = np.arange(5.15, 5.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Dermatology')
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Ecoli')
_range = np.arange(1.55, 1.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Glass')
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Iris')
_range = np.arange(9, 9.375, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='DBSCAN', fix_k=10,
label_files='Phoneme')
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='WheatSeeds')
_range = np.arange(2.2, 2.575, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='DBSCAN', fix_k=5,
label_files='Wine')
# ==========================================================================
# ****************************************************************HDBSCAN
fig_interval = 2
band = 0.5
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='HDBSCAN',
label_files='Banknote')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='HDBSCAN',
label_files='Dermatology')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='HDBSCAN',
label_files='Ecoli')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='HDBSCAN',
label_files='Glass')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='HDBSCAN',
label_files='Iris')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='HDBSCAN',
label_files='Phoneme')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='HDBSCAN',
label_files='WheatSeeds')
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='HDBSCAN',
label_files='Wine')
# ==========================================================================
# ****************************************************************Quickshift++
fig_interval = 2
band = 0.5
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='Quickshift++', fix_k=0.7,
label_files='Banknote')
_range = np.arange(7, 22, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Dermatology')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Ecoli')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Glass')
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Iris')
_range = np.arange(235, 250, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Phoneme')
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='WheatSeeds')
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='Quickshift++', fix_k=0.3,
label_files='Wine')
# ==========================================================================
# ****************************************************************CLASSIX distance
fig_interval = 0.1
band = 0.015
_range = np.arange(0.01, 0.375, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='CLASSIX (distance)', fix_k=6, sorting='pca',
label_files='Banknote')
_range = np.arange(0.325, 0.676, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='CLASSIX (distance)', fix_k=4, sorting='pca',
label_files='Dermatology')
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='CLASSIX (distance)', fix_k=3, sorting='pca',
label_files='Ecoli')
_range = np.arange(0.375, 0.75, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='CLASSIX (distance)', fix_k=0, sorting='pca',
label_files='Glass')
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='CLASSIX (distance)', fix_k=6, sorting='pca',
label_files='Iris')
_range = np.arange(0.27, 0.625, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='CLASSIX (distance)', fix_k=9, sorting='pca',
label_files='Phoneme')
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='CLASSIX (distance)', fix_k=7, sorting='pca',
label_files='WheatSeeds')
_range = np.arange(0.2, 0.575, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='CLASSIX (distance)', fix_k=7, sorting='pca',
label_files='Wine')
# ==========================================================================
# ****************************************************************CLASSIX density
fig_interval = 0.1
band = 0.015
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[0], _range=_range, clustering='CLASSIX (density)', fix_k=6, sorting='pca',
label_files='Banknote')
_range = np.arange(0.5, 0.875, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[1], _range=_range, clustering='CLASSIX (density)', fix_k=4, sorting='pca',
label_files='Dermatology')
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[2], _range=_range, clustering='CLASSIX (density)', fix_k=3, sorting='pca',
label_files='Ecoli')
_range = np.arange(0.475, 0.85, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[3], _range=_range, clustering='CLASSIX (density)', fix_k=0, sorting='pca',
label_files='Glass')
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[4], _range=_range, clustering='CLASSIX (density)', fix_k=6, sorting='pca',
label_files='Iris')
_range = np.arange(1.3, 1.675, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[5], _range=_range, clustering='CLASSIX (density)', fix_k=9, sorting='pca',
label_files='Phoneme')
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[6], _range=_range, clustering='CLASSIX (density)', fix_k=7, sorting='pca',
label_files='WheatSeeds')
_range = np.arange(0.4, 0.775, 0.025)
# print("Parameter range: ", len(_range))
run_sensitivity_test(datasets=datasets[7], _range=_range, clustering='CLASSIX (density)', fix_k=7, sorting='pca',
label_files='Wine')
def visualize_params_search():
# ==========================================================================
# ****************************************************************Mean shift
fig_interval = 2
band = 0.5
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(1, 16, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Meanshift',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************DBSCAN
fig_interval = 0.1
band = 0.01
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(5.15, 5.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(1.55, 1.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(9, 9.375, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(0.55, 0.925, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(2.2, 2.575, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='DBSCAN',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************HDBSCAN
fig_interval = 2
band = 0.5
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(2, 17, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='HDBSCAN',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************Quickshift++
fig_interval = 2
band = 0.5
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(7, 22, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(235, 250, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Phoneme', band=band, fig_interval=fig_interval+5)
_range = np.arange(10, 25, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(5, 20, 1)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='Quickshift++',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************CLASSIX distance
fig_interval = 0.1
band = 0.015
_range = np.arange(0.01, 0.375, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(0.325, 0.676, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(0.375, 0.75, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(0.27, 0.625, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(0.2, 0.575, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (distance)',
label_files='Wine', band=band, fig_interval=fig_interval)
# ==========================================================================
# ****************************************************************CLASSIX density
fig_interval = 0.1
band = 0.015
_range = np.arange(0.05, 0.425, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Banknote', band=band, fig_interval=fig_interval)
_range = np.arange(0.5, 0.875, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Dermatology', band=band, fig_interval=fig_interval)
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Ecoli', band=band, fig_interval=fig_interval)
_range = np.arange(0.475, 0.85, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Glass', band=band, fig_interval=fig_interval)
_range = np.arange(0.15, 0.525, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Iris', band=band, fig_interval=fig_interval)
_range = np.arange(1.3, 1.675, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Phoneme', band=band, fig_interval=fig_interval)
_range = np.arange(0.1, 0.475, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='WheatSeeds', band=band, fig_interval=fig_interval)
_range = np.arange(0.4, 0.775, 0.025)
# print("Parameter range: ", len(_range))
visualize_params(_range=_range, clustering='CLASSIX (density)',
label_files='Wine', band=band, fig_interval=fig_interval)
def compare_best_params():
datasets = []
data = pd.read_csv("data/Real_data/Iris.csv")
le = preprocessing.LabelEncoder()
data['Species'] = le.fit_transform(data['Species'])
X_irirs = data.drop(['Species','Id'],axis=1).values
y_irirs = data['Species'].values
datasets.append((X_irirs, y_irirs))
data = pd.read_csv("data/Real_data/Dermatology.csv").values
X_dermatology = data[:, :data.shape[1]-1]
y_dermatology = data[:, data.shape[1]-1]
datasets.append((X_dermatology, y_dermatology))
data =
|
pd.read_csv("data/Real_data/Ecoli.csv")
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime
import random
import sys
from sklearn.model_selection import ParameterSampler
from scipy.stats import randint as sp_randint
from scipy.stats import uniform
from functions import (
under_over_sampler,
classifier_train,
classifier_train_manual,
make_generic_df,
get_xy_from_df,
plot_precision_recall_vs_threshold,
plot_precision_vs_recall,
)
from classification_methods import (
random_forest_classifier,
# knn_classifier,
# logistic_regression,
# sgd_classifier,
# ridge_classifier,
# svm_classifier,
# gaussian_nb_classifier,
xgboost_classifier,
)
# stop warnings from sklearn
# https://stackoverflow.com/questions/32612180/eliminating-warnings-from-scikit-learn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# to profile script for memory usage, use:
# /usr/bin/time -f "mem=%K RSS=%M elapsed=%E cpu.sys=%S .user=%U" python random_search_run.py
# from https://unix.stackexchange.com/questions/375889/unix-command-to-tell-how-much-ram-was-used-during-program-runtime
#############################################################################
# RANDOM SEARCH PARAMETERS
# fill these out to set parameters for the random search
# set a seed for the parameter sampler
sampler_seed = random.randint(0, 2 ** 16)
no_iterations = 30000
# create list of tools that we want to look over
# these are only the tools that we know we have wear-failures [57, 54, 32, 36, 22, 8, 2]
# tool_list_all = [57, 54, 32, 36, 22, 8, 2]
tool_list_all = [54]
# tool_list_some = [57, 32, 22, 8, 2, 36]
tool_list_some = []
# other parameters
scaler_methods = ["standard", "min_max"]
imbalance_ratios = [0.1,0.5,0.8,1]
average_across_indices = [True,False]
# list of classifiers to test
classifier_list_all = [
random_forest_classifier,
# knn_classifier,
# logistic_regression,
# sgd_classifier,
# ridge_classifier,
# svm_classifier,
# gaussian_nb_classifier,
xgboost_classifier,
]
over_under_sampling_methods = [
"random_over",
"random_under",
"random_under_bootstrap",
"smote",
"adasyn",
None,
]
# no cut indices past 9 that are valid
index_list = [
list(range(0, 10)),
list(range(1, 10)),
list(range(1, 9)),
list(range(1, 8)),
list(range(2, 8)),
list(range(3, 7)),
list(range(2, 9)),
list(range(2, 10)),
]
#############################################################################
# test and train folds
# failures for tool 54 on following dates:
# 2018-11-15
# 2019-01-28
# 2019-01-29
# 2019-01-30
# 2019-02-04
# 2019-02-07
# 2019-02-08
# 2019-09-11 - These are resampled into pickle files (in case that matters)
# 2019-11-27
# 2019-01-23 - These are from January data without speed
# update 8/6/2020: does not look like we use the 'test_fold'
# therefore, I have divided the dates into the other three folds
test_fold = [
"2018-10-23",
"2018-11-15", # failures
"2018-11-16",
"2018-11-19",
"2019-09-11", # failures
"2019-09-13",
]
train_fold_1 = [
"2018-11-21",
"2019-01-25",
"2019-01-28", # failures
"2019-11-27", # failures
"2019-01-23", # failures, from Jan without speed
"2019-05-03",
"2019-09-11", # failures
"2019-09-13",
]
train_fold_2 = [
"2019-01-29", # failures
"2019-01-30", # failures
"2019-02-01",
"2019-02-08", # failures
"2019-09-10",
"2019-09-12",
"2018-11-20",
"2019-02-11",
"2019-01-24", # i forgot this one earlier
"2019-05-04",
"2018-11-16",
"2018-11-19",
]
train_fold_3 = [
"2019-02-04", # failures
"2019-02-05",
"2019-02-07", # failures
"2019-05-06",
"2019-01-22", # from Jan without speed
"2018-10-23",
"2018-11-15", # failures
]
train_folds = [train_fold_1, train_fold_2, train_fold_3]
train_dates_all = [date for sublist in train_folds for date in sublist]
#############################################################################
# start by loading the csv with the features
# file_folder = Path(
# "/home/tim/Documents/Checkfluid-Project/data/processed/"
# "_tables/low_levels_labels_created_2020-03-11"
# )
# for HPC
file_folder = Path(
"/home/tvhahn/projects/def-mechefsk/tvhahn/_tables/low_levels_labels_created_2020-03-11/"
)
file = file_folder / "low_level_labels_created_2020.03.11_v3_updated_2020.08.06.csv"
df = pd.read_csv(file)
# sort the values by date and index so that it is reproducible
df = df.sort_values(by=["unix_date", "tool", "index"])
# replace NaN's in failed columns with 0
df["failed"].fillna(
0, inplace=True, downcast="int"
) # replace NaN in 'failed' col with 0
# function to convert pandas column to datetime format
def convert_to_datetime(cols):
unix_date = cols[0]
value = datetime.fromtimestamp(unix_date)
return value
# apply 'date_ymd' column to dataframe
df["date"] = df[["unix_date"]].apply(convert_to_datetime, axis=1)
# convert to a period, and then string
df["date_ymd"] =
|
pd.to_datetime(df["date"], unit="s")
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
"""
This file is part of MADIP: Molecular Atlas Data Integration Pipeline
This module loads the data for step_1_collect_protein_data.ipynb jupyter notebook.
Age in days is only approximate and not involved in the downstream analysis. Qualitative age category is defined based on the original data sources.
Copyright 2021 Blue Brain Project / EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
def get_hamezah_2019_dataframe():
"""
Return pandas dataframe for Hamezah 2019
:return:
pandas.core.frame.DataFrame: dataframe containing Hamezah 2019 data.
"""
print("Importing Hamezah 2019 pandas dataframe.")
# NEWDATA 1. Hamezah 2019 (Mice Hippocampus, Medial Prefrontal Cortex, and Striatum)
hamezah_2019_f = pd.ExcelFile('../data/source_data/Hameezah_2019_formatted.xlsx')
hamezah_2019_hippocampus = hamezah_2019_f.parse("Hippocampus", index_col=None)
hamezah_2019_pfc = hamezah_2019_f.parse("MedialPrefrontalCortex", index_col=None)
hamezah_2019_striatum = hamezah_2019_f.parse("Striatum", index_col=None)
hamezah_2019_hippocampus['location'] = 'hippocampus'
hamezah_2019_pfc['location'] = 'cortex' # prefrontal cortex
hamezah_2019_striatum['location'] = 'striatum'
hamezah_2019 = pd.concat([hamezah_2019_hippocampus, hamezah_2019_pfc, hamezah_2019_striatum])
hamezah_2019['Gene names'] = hamezah_2019['Gene names'].str.upper()
hamezah_2019['Calc: LFQ intensity WT'] = 2 ** hamezah_2019['Average LFQ intensity (Log2) WT-Ctrl']
hamezah_2019['Calc: LFQ intensity Alzheimer Tg'] = 2 ** hamezah_2019['Average LFQ intensity (Log2) Tg-ctrl'] #
hamezah_2019 = hamezah_2019.reset_index(drop=True)
hamezah_2019['Gene names'] = hamezah_2019['Gene names'].str.upper()
hamezah_2019 = hamezah_2019.drop(columns=['Protein names', 'Average LFQ intensity (Log2) WT-Ctrl',
'Average LFQ intensity (Log2) Tg-ctrl',
'Number of\npeptides',
'Maxquant\nscore', 'MS/MS\ncount', 'p-value', 'q-value'
])
hamezah_2019 = hamezah_2019.rename(columns={'Protein\naccession': 'Uniprot',
'Gene names': 'gene_names',
'Molecular\nweight (kDa)': 'molecular_weight_kDa',
'Calc: LFQ intensity WT': 'LFQintensity_WT',
'Calc: LFQ intensity Alzheimer Tg': 'LFQintensity_Alzheimer'
})
hamezah_2019_df = pd.wide_to_long(hamezah_2019, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa', 'location'],
j='condition', sep='_', suffix=r'\w+')
hamezah_2019_df = hamezah_2019_df.reset_index()
hamezah_2019_df['Study'] = 'Hamezah 2019'
hamezah_2019_df['Organism'] = 'mouse'
hamezah_2019_df['Age_days'] = 365 + 3 * 30 + 21
# Five-month-old mice ... for a duration of 10 months -> 15 months
hamezah_2019_df['raw_data_units'] = 'LFQintensity'
hamezah_2019_df = hamezah_2019_df.rename(columns={'LFQintensity': 'raw_data'})
return hamezah_2019_df
def get_hamezah_2018_dataframe():
"""
Return pandas dataframe for Hamezah 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Hamezah 2018 data
"""
# ### Hamezah_2018
print("Importing pandas Hamezah 2018 dataframe.")
hamezah_2018f = pd.ExcelFile('../data/source_data/1-s2.0-S0531556518303097-mmc2.xlsx')
hamezah_2018_hippocampus = hamezah_2018f.parse('Sheet1')
hamezah_2018_pfc = hamezah_2018f.parse('Sheet2') # medial prefrontal cortex
hamezah_2018_striatum = hamezah_2018f.parse('Sheet3')
hamezah_2018_hippocampus['location'] = 'hippocampus'
hamezah_2018_pfc['location'] = 'cortex' # prefrontal cortex
hamezah_2018_striatum['location'] = 'striatum'
hamezah_2018 = pd.concat([hamezah_2018_hippocampus, hamezah_2018_pfc, hamezah_2018_striatum])
hamezah_2018['Gene names'] = hamezah_2018['Gene names'].str.upper()
hamezah_2018['Calc: LFQ intensity 14 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 14 months']
hamezah_2018['Calc: LFQ intensity 18 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 18 months']
hamezah_2018['Calc: LFQ intensity 23 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 23 months']
hamezah_2018['Calc: LFQ intensity 27 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 27 months']
hamezah_2018 = hamezah_2018.reset_index(drop=True)
hamezah_2018['Gene names'] = hamezah_2018['Gene names'].str.upper()
hamezah_2018 = hamezah_2018.drop(columns=['Protein names', 'Average LFQ intensity (Log2) 14 months',
'Average LFQ intensity (Log2) 18 months',
'Average LFQ intensity (Log2) 23 months',
'Average LFQ intensity (Log2) 27 months',
'Number of\npeptides',
'Maxquant\nscore', 'MS/MS\ncount', 'ANOVA significant'
])
hamezah_2018 = hamezah_2018.rename(columns={'Protein\naccession': 'Uniprot',
'Gene names': 'gene_names',
'Molecular\nweight (kDa)': 'molecular_weight_kDa',
'Calc: LFQ intensity 14 months': 'LFQintensity_14months',
'Calc: LFQ intensity 18 months': 'LFQintensity_18months',
'Calc: LFQ intensity 23 months': 'LFQintensity_23months',
'Calc: LFQ intensity 27 months': 'LFQintensity_27months'
})
hamezah_2018_df = pd.wide_to_long(hamezah_2018, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa', 'location'],
j='sample_id', sep='_', suffix=r'\w+')
hamezah_2018_df = hamezah_2018_df.reset_index()
hamezah_2018_df['Study'] = 'Hamezah 2018'
hamezah_2018_df['Organism'] = 'rat'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '14months', 'Age_days'] = 365 + 2 * 30 + 21 # 446 # 365 + 2*30 + 21 # '14 months'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '18months', 'Age_days'] = 365 + 6 * 30 + 21 # 365 + 6*30 +21 # '18 months'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '23months', 'Age_days'] = 721 # 365*2 -30 +21 # '23 months'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '27months', 'Age_days'] = 841 # 365*2 + 30*3 +21 # '27 months'
hamezah_2018_df['raw_data_units'] = 'LFQintensity'
hamezah_2018_df = hamezah_2018_df.rename(columns={'LFQintensity': 'raw_data'})
return hamezah_2018_df
def get_chuang_2018_dataframe():
"""
Return pandas dataframe for Chuang 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Chuang 2018 data.
"""
print("Importing Chuang 2018 pandas dataframe.")
chuang2018f = pd.ExcelFile('../data/source_data/Supporting File S-3_The lists of the proteins identified in the axon and whole-cell samples.xlsx')
chuang2018_axon = chuang2018f.parse('Axon samples, 2548', skiprows=3, index_col=None)
chuang2018_wholecell = chuang2018f.parse('Whole-cell samples, 2752', skiprows=3, index_col=None)
chuang2018_wholecell = chuang2018_wholecell.drop(
['Fasta headers', 'Number of proteins', 'Peptides axon', 'Peptides whole-cell',
'Razor + unique peptides axon', 'Razor + unique peptides whole-cell',
'Score', 'Sequence coverage axon [%]',
'Sequence coverage whole-cell [%]',
'Fasta headers.1', 'Number of proteins.1',
'Peptides axon.1', 'Peptides whole-cell.1',
'Razor + unique peptides axon.1',
'Razor + unique peptides whole-cell.1', 'Score.1',
'Sequence coverage axon [%].1', 'Sequence coverage whole-cell [%].1'], axis=1)
chuang2018_axon = chuang2018_axon.drop(['Fasta headers', 'Number of proteins', 'Peptides axon',
'Peptides whole-cell',
'Razor + unique peptides axon', 'Razor + unique peptides whole-cell',
'Score', 'Sequence coverage axon [%]',
'Sequence coverage whole-cell [%]',
'Fasta headers.1', 'Number of proteins.1',
'Peptides axon.1', 'Peptides whole-cell.1',
'Razor + unique peptides axon.1',
'Razor + unique peptides whole-cell.1', 'Score.1',
'Sequence coverage axon [%].1', 'Sequence coverage whole-cell [%].1'],
axis=1)
chuang2018_axon = chuang2018_axon.rename(columns={'GN': 'gene_names',
'Accession': 'Uniprot',
'Protein IDs': 'Experiment1:Protein IDs',
'Protein IDs.1': 'Experiment2:Protein IDs.1',
'iBAQ axon': 'iBAQ_Experiment1',
'iBAQ axon.1': 'iBAQ_Experiment2'})
chuang2018_wholecell = chuang2018_wholecell.rename(columns={'GN': 'gene_names',
'Accession': 'Uniprot',
'Protein IDs': 'Experiment1:Protein IDs',
'Protein IDs.1': 'Experiment2:Protein IDs.1',
'iBAQ whole-cell': 'iBAQ_Experiment1',
'iBAQ whole-cell.1': 'iBAQ_Experiment2'})
chuang2018_axon['gene_names'] = chuang2018_axon['gene_names'].str.upper()
chuang2018_wholecell['gene_names'] = chuang2018_wholecell['gene_names'].str.upper()
chuang2018_axon['location'] = 'axon'
chuang2018_wholecell['location'] = 'neurons' # 'neuron_whole_cell'
chuang2018 = pd.concat([chuang2018_axon, chuang2018_wholecell], sort=False)
chuang2018 = chuang2018.reset_index(drop=True)
chuang2018 = chuang2018.drop(['Description', 'Experiment1:Protein IDs', 'Experiment2:Protein IDs.1'], axis=1)
chuang2018 = chuang2018.rename(columns={'Mol. weight [kDa]': 'molecular_weight_kDa'})
chuang2018_df = pd.wide_to_long(chuang2018, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa', 'location'],
j='sample_id', sep='_', suffix=r'\w+')
chuang2018_df = chuang2018_df.reset_index()
chuang2018_df['Study'] = 'Chuang 2018'
chuang2018_df['Organism'] = 'rat'
chuang2018_df['Age_days'] = 18 # E18
chuang2018_df['Age_cat'] = 'embr'
chuang2018_df['raw_data_units'] = 'iBAQ'
chuang2018_df = chuang2018_df.rename(columns={'iBAQ': 'raw_data'})
return chuang2018_df
def get_duda_2018_dataframe():
"""
Return pandas dataframe for Duda 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Duda 2018 data.
"""
print("Importing Duda 2018 pandas dataframe.")
dudaf = pd.ExcelFile('../data/source_data/dataProt.xlsx')
duda_hippocampus = dudaf.parse('hippocampus')
duda_cerebellum = dudaf.parse('cerebellum')
duda_cortex = dudaf.parse('cortex')
# fill merged cells with the same values
# merged cells processed manually to avoid artefacts
# duda_hippocampus = duda_hippocampus.fillna(method='ffill')
# duda_cerebellum = duda_cerebellum.fillna(method='ffill')
# duda_cortex = duda_cortex.fillna(method='ffill')
duda_hippocampus['location'] = 'hippocampus'
duda_cerebellum['location'] = 'cerebellum'
duda_cortex['location'] = 'cortex'
duda = pd.concat([duda_hippocampus, duda_cerebellum, duda_cortex], sort=False)
duda = duda.reset_index(drop=True)
duda['gene_names'] = duda['gene_names'].str.upper()
# young = 1 month; old = 12 months
duda['duplicated'] = duda.duplicated(subset=['Young Mean concentration',
'Adult Mean concentration', 'location'], keep=False)
duda = duda.drop(columns='duplicated')
duda = duda.rename(columns={'Young Mean concentration': 'MeanConcentration_young',
'Adult Mean concentration': 'MeanConcentration_adult'})
duda_2018_df = pd.wide_to_long(duda, stubnames='MeanConcentration',
i=['gene_names', 'location'],
j='condition', sep='_', suffix=r'\w+')
duda_2018_df = duda_2018_df.reset_index()
duda_2018_df['Study'] = 'Duda 2018'
duda_2018_df['Organism'] = 'mouse'
duda_2018_df.loc[duda_2018_df['condition'] == 'young', 'Age_days'] = 51 # P30 = 21 embryonic days + 30 postnatal
duda_2018_df.loc[duda_2018_df['condition'] == 'adult', 'Age_days'] = 386 # 365 + 21 embryonic days #'12 months'
duda_2018_df['raw_data_units'] = 'Mean concentration [mol/(g total protein)]'
duda_2018_df = duda_2018_df.rename(columns={'MeanConcentration': 'raw_data'})
return duda_2018_df
def get_krogager_2018_dataframe():
"""
Return pandas dataframe for Krogager 2018
:return:
krogager_df :pandas.core.frame.DataFrame: dataframe containing Krogager 2018 data.
"""
print("Importing Krogager 2018 pandas dataframe.")
krogagerf = pd.ExcelFile('../data/source_data/MouseBrainProteomeKrogager2018_supp.xlsx')
krogager = krogagerf.parse('Sheet1')
krogager = krogager.drop(columns=['Significant (S0:1, FDR:0.05)', '-LOG(P-value)',
'Log2(SORT Output / Control Output)', 'Protein names',
'Intensity', 'MS/MS Count'])
# in this case we combine samples due to many NaN in individual samples
col1 = krogager.loc[:, ['Log2(LFQ) Control Output 1', 'Log2(LFQ) Control Output 2', 'Log2(LFQ) Control Output 3',
'Log2(LFQ) Control Output 4', 'Log2(LFQ) Control Output 5', 'Log2(LFQ) Control Output 6']]
krogager['Log2(LFQ) Control median'] = col1.median(axis=1)
col2 = krogager.loc[:, ['Log2(LFQ) SORT Output 1', 'Log2(LFQ) SORT Output 2', 'Log2(LFQ) SORT Output 3']]
krogager['Log2(LFQ) SORT median'] = col2.median(axis=1)
krogager['LFQintensity_control'] = 2 ** krogager['Log2(LFQ) Control median']
krogager['LFQintensity_SORT'] = 2 ** krogager['Log2(LFQ) SORT median']
krogager['Gene names'] = krogager['Gene names'].str.upper()
krogager = krogager.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot'
})
krogager_drop = krogager.drop(['Log2(LFQ) Control Output 1',
'Log2(LFQ) Control Output 2', 'Log2(LFQ) Control Output 3',
'Log2(LFQ) Control Output 4', 'Log2(LFQ) Control Output 5',
'Log2(LFQ) Control Output 6', 'Log2(LFQ) SORT Output 1',
'Log2(LFQ) SORT Output 2', 'Log2(LFQ) SORT Output 3',
'Log2(LFQ) Control median', 'Log2(LFQ) SORT median'], axis=1)
krogager_df = pd.wide_to_long(krogager_drop, stubnames='LFQintensity',
i=['Uniprot', 'gene_names'],
j='condition', sep='_', suffix=r'\w+')
krogager_df = krogager_df.reset_index()
krogager_df['Study'] = 'Krogager 2018'
krogager_df['Organism'] = 'mouse'
krogager_df['Age_days'] = 13 * 7 + 21 # 13*7 +21 # 10 weeks + 2weeks after surgery + 1 week treatment
krogager_df.loc[krogager_df['condition'] == 'SORT', 'location'] = 'neurons' # striatum neurons
krogager_df.loc[krogager_df['condition'] == 'control', 'location'] = 'striatum' # striatum neurons
krogager_df['raw_data_units'] = 'LFQintensity'
krogager_df = krogager_df.rename(columns={'LFQintensity': 'raw_data'})
return krogager_df
def get_hosp_2017_dataframe():
"""
Return pandas dataframe for Hosp 2017
:return:
pandas.core.frame.DataFrame: dataframe containing Hosp 2017 data.
"""
print("Importing Hosp 2017 pandas dataframe. This can last a while.")
hosp_solf = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717315772-mmc2.xlsx')
hosp_sol = hosp_solf.parse('S1A_soluble proteome')
hosp_sol2f = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717315772-mmc3.xlsx')
hosp_sol2 = hosp_sol2f.parse('S2A_CSF_proteome')
hosp_insolf = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717315772-mmc4.xlsx')
hosp_insol = hosp_insolf.parse('S3A_insolube_proteome_data')
hosp_sol = hosp_sol.drop(
['GOBP name', 'GOMF name', 'GOCC name', 'KEGG name', 'Pfam', 'GSEA', 'Keywords', 'Corum', 'Peptides',
'Razor + unique peptides', 'Razor + unique peptides', 'Sequence coverage [%]',
'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]', 'Q-value'], axis=1)
hosp_sol = hosp_sol[hosp_sol.columns.drop(list(hosp_sol.filter(regex=r'LFQ')))]
hosp_sol = hosp_sol[hosp_sol.columns.drop(list(hosp_sol.filter(regex=r'_R6\/2_')))]
hosp_sol2 = hosp_sol2.drop(
['GOBP name', 'GOMF name', 'GOCC name', 'KEGG name', 'Pfam', 'GSEA', 'Fasta headers', 'Corum', 'Peptides',
'Razor + unique peptides', 'Razor + unique peptides', 'Sequence coverage [%]',
'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]', 'Q-value'], axis=1)
hosp_sol2 = hosp_sol2[hosp_sol2.columns.drop(list(hosp_sol2.filter(regex=r'LFQ')))]
hosp_sol2 = hosp_sol2[hosp_sol2.columns.drop(list(hosp_sol2.filter(regex=r'_R6\/2_')))]
hosp_insol = hosp_insol.drop(
['GOBP name', 'GOMF name', 'GOCC name', 'KEGG name', 'Pfam', 'GSEA', 'Fasta headers', 'Corum',
'Coiled-coil domain',
'LCR motif', 'polyQ domain', 'coiled-coil length', 'LCR length', 'polyQ length', 'Peptides',
'Razor + unique peptides', 'Razor + unique peptides', 'Sequence coverage [%]',
'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]', 'Q-value'], axis=1)
hosp_insol = hosp_insol[hosp_insol.columns.drop(list(hosp_insol.filter(regex=r'LFQ')))]
hosp_insol = hosp_insol[hosp_insol.columns.drop(list(hosp_insol.filter(regex=r'_R6\/2_')))]
hosp_sol['Gene names'] = hosp_sol['Gene names'].str.upper()
hosp_sol2['Gene names'] = hosp_sol2['Gene names'].str.upper()
hosp_insol['Gene names'] = hosp_insol['Gene names'].str.upper()
###
hosp_sol = hosp_sol.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Mol. weight [kDa]': 'molecular_weight_kDa'})
hosp_sol = hosp_sol.drop([
'Protein IDs', 'Protein names', 'Unique peptides', 'Intensity', 'MS/MS Count',
'iBAQ', 'iBAQ library'], axis=1)
hosp_sol2 = hosp_sol2.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Mol. weight [kDa]': 'molecular_weight_kDa'})
hosp_sol2 = hosp_sol2.drop([
'Protein IDs', 'Protein names', 'Unique peptides', 'Score', 'Intensity', 'MS/MS Count',
'iBAQ_total'], axis=1)
hosp_insol = hosp_insol.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Mol. weight [kDa]': 'molecular_weight_kDa'})
hosp_insol = hosp_insol.drop([
'Protein IDs', 'Protein names', 'Unique peptides', 'Score', 'Intensity', 'MS/MS Count',
'iBAQ'], axis=1)
hosp_sol.columns = ['Uniprot', 'gene_names', 'molecular_weight_kDa', 'iBAQ_5wWTce1', 'iBAQ_5wWTce2', 'iBAQ_5wWTce3',
'iBAQ_5wWTce4',
'iBAQ_5wWTco1', 'iBAQ_5wWTco2', 'iBAQ_5wWTco3',
'iBAQ_5wWTco4', 'iBAQ_5wWThc1', 'iBAQ_5wWThc2',
'iBAQ_5wWThc3', 'iBAQ_5wWThc4', 'iBAQ_5wWTst1',
'iBAQ_5wWTst2', 'iBAQ_5wWTst3', 'iBAQ_5wWTst4',
'iBAQ_8wWTce1', 'iBAQ_8wWTce2', 'iBAQ_8wWTce3',
'iBAQ_8wWTco1', 'iBAQ_8wWTco2', 'iBAQ_8wWTco3',
'iBAQ_8wWThc1', 'iBAQ_8wWThc2', 'iBAQ_8wWThc3',
'iBAQ_8wWTst1', 'iBAQ_8wWTst2', 'iBAQ_8wWTst3',
'iBAQ_12wWTce1', 'iBAQ_12wWTce2', 'iBAQ_12wWTce3',
'iBAQ_12wWTco1', 'iBAQ_12wWTco2', 'iBAQ_12wWTco3',
'iBAQ_12wWThc1', 'iBAQ_12wWThc2', 'iBAQ_12wWThc3',
'iBAQ_12wWTst1', 'iBAQ_12wWTst2', 'iBAQ_12wWTst3']
hosp_sol2.columns = ['Uniprot', 'gene_names', 'molecular_weight_kDa', 'iBAQ_5wWT1', 'iBAQ_5wWT2', 'iBAQ_5wWT3',
'iBAQ_8wWT1', 'iBAQ_8wWT2',
'iBAQ_8wWT3', 'iBAQ_12wWT1', 'iBAQ_12wWT2', 'iBAQ_12wWT3'
]
hosp_insol.columns = ['Uniprot', 'gene_names', 'molecular_weight_kDa',
'iBAQ_5wWTce1', 'iBAQ_5wWTce2', 'iBAQ_5wWTce3', 'iBAQ_5wWTce4',
'iBAQ_5wWTco1', 'iBAQ_5wWTco2', 'iBAQ_5wWTco3',
'iBAQ_5wWTco4', 'iBAQ_5wWThc1', 'iBAQ_5wWThc2',
'iBAQ_5wWThc3', 'iBAQ_5wWThc4', 'iBAQ_5wWTst1',
'iBAQ_5wWTst2', 'iBAQ_5wWTst3', 'iBAQ_5wWTst4',
'iBAQ_8wWTce1', 'iBAQ_8wWTce2', 'iBAQ_8wWTce3',
'iBAQ_8wWTco1', 'iBAQ_8wWTco2', 'iBAQ_8wWTco3',
'iBAQ_8wWThc1', 'iBAQ_8wWThc2', 'iBAQ_8wWThc3',
'iBAQ_8wWTst1', 'iBAQ_8wWTst2', 'iBAQ_8wWTst3',
'iBAQ_12wWTce1', 'iBAQ_12wWTce2', 'iBAQ_12wWTce3',
'iBAQ_12wWTco1', 'iBAQ_12wWTco2', 'iBAQ_12wWTco3',
'iBAQ_12wWThc1', 'iBAQ_12wWThc2', 'iBAQ_12wWThc3',
'iBAQ_12wWTst1', 'iBAQ_12wWTst2', 'iBAQ_12wWTst3',
'iBAQ_5wWTcex/yiBAQ_inc5wWTce',
'iBAQ_5wWTcox/yiBAQ_inc5wWTco',
'iBAQ_5wWThcx/yiBAQ_inc5wWThc',
'iBAQ_5wWTstx/yiBAQ_inc5wWTst',
'iBAQ_8wWTcex/yiBAQ_inc8wWTce',
'iBAQ_8wWTcox/yiBAQ_inc8wWTco',
'iBAQ_8wWThcx/yiBAQ_inc8wWThc',
'iBAQ_8wWTstx/yiBAQ_inc8wWTst',
'iBAQ_12wWTcex/yiBAQ_inc12wWTce',
'iBAQ_12wWTcox/yiBAQ_inc12wWTco',
'iBAQ_12wWThcx/yiBAQ_inc12wWThc',
'iBAQ_12wWTstx/yiBAQ_inc12wWTst']
hosp_insol = hosp_insol.drop(['iBAQ_5wWTcex/yiBAQ_inc5wWTce',
'iBAQ_5wWTcox/yiBAQ_inc5wWTco',
'iBAQ_5wWThcx/yiBAQ_inc5wWThc',
'iBAQ_5wWTstx/yiBAQ_inc5wWTst',
'iBAQ_8wWTcex/yiBAQ_inc8wWTce',
'iBAQ_8wWTcox/yiBAQ_inc8wWTco',
'iBAQ_8wWThcx/yiBAQ_inc8wWThc',
'iBAQ_8wWTstx/yiBAQ_inc8wWTst',
'iBAQ_12wWTcex/yiBAQ_inc12wWTce',
'iBAQ_12wWTcox/yiBAQ_inc12wWTco',
'iBAQ_12wWThcx/yiBAQ_inc12wWThc',
'iBAQ_12wWTstx/yiBAQ_inc12wWTst'], axis=1)
hosp_sol_df = pd.wide_to_long(hosp_sol, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
hosp_sol_df = hosp_sol_df.reset_index()
hosp_sol_df['Study'] = 'Hosp 2017, soluble'
hosp_sol_df['Organism'] = 'mouse'
hosp_sol_df['raw_data_units'] = 'iBAQ'
hosp_sol_df = hosp_sol_df.rename(columns={'iBAQ': 'raw_data'})
hosp_sol2_df = pd.wide_to_long(hosp_sol2, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
hosp_sol2_df = hosp_sol2_df.reset_index()
hosp_sol2_df['Study'] = 'Hosp 2017, CSF'
hosp_sol2_df['Organism'] = 'mouse'
hosp_sol2_df['raw_data_units'] = 'iBAQ'
hosp_sol2_df = hosp_sol2_df.rename(columns={'iBAQ': 'raw_data'})
hosp_insol_df = pd.wide_to_long(hosp_insol, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
hosp_insol_df = hosp_insol_df.reset_index()
hosp_insol_df['Study'] = 'Hosp 2017, insoluble'
hosp_insol_df['Organism'] = 'mouse'
hosp_insol_df['raw_data_units'] = 'iBAQ'
hosp_insol_df = hosp_insol_df.rename(columns={'iBAQ': 'raw_data'})
hosp_3 = pd.concat([hosp_sol_df, hosp_sol2_df, hosp_insol_df
], ignore_index=True, sort=False)
hosp_3.loc[hosp_3['sample_id'].isin(['5wWTce1', '5wWTce2', '5wWTce3', '5wWTce4', '5wWTco1', '5wWTco2',
'5wWTco3', '5wWTco4', '5wWThc1', '5wWThc2', '5wWThc3', '5wWThc4',
'5wWTst1', '5wWTst2', '5wWTst3', '5wWTst4', '5wWT1', '5wWT2',
'5wWT3']), 'Age_days'] = 35 + 21 # 35 +21 #'5 weeks'
hosp_3.loc[hosp_3['sample_id'].isin(['8wWTce1', '8wWTce2',
'8wWTce3', '8wWTco1', '8wWTco2', '8wWTco3', '8wWThc1', '8wWThc2',
'8wWThc3', '8wWTst1', '8wWTst2', '8wWTst3', '8wWT1', '8wWT2',
'8wWT3']), 'Age_days'] = 56 + 21 # 56 +21 #'8 weeks'
hosp_3.loc[
hosp_3['sample_id'].isin(['12wWTce1', '12wWTce2', '12wWTce3', '12wWTco1', '12wWTco2', '12wWTco3', '12wWThc1',
'12wWThc2', '12wWThc3', '12wWTst1', '12wWTst2', '12wWTst3', '12wWT1', '12wWT2',
'12wWT3']), 'Age_days'] = 12 * 7 + 21 # 12*7 +21 #'12 weeks'
hosp_3.loc[hosp_3['sample_id'].isin(['5wWTce1', '5wWTce2', '5wWTce3', '5wWTce4', '8wWTce1', '8wWTce2',
'8wWTce3', '12wWTce1', '12wWTce2',
'12wWTce3']), 'location'] = 'cerebellum'
hosp_3.loc[hosp_3['sample_id'].isin(['5wWTco1', '5wWTco2', '5wWTco3', '5wWTco4', '8wWTco1', '8wWTco2', '8wWTco3',
'12wWTco1', '12wWTco2', '12wWTco3']), 'location'] = 'cortex'
hosp_3.loc[hosp_3['sample_id'].isin(['5wWThc1', '5wWThc2', '5wWThc3', '5wWThc4',
'8wWThc1', '8wWThc2', '8wWThc3', '12wWThc1', '12wWThc2',
'12wWThc3']), 'location'] = 'hippocampus'
hosp_3.loc[hosp_3['sample_id'].isin(
['5wWTst1', '5wWTst2', '5wWTst3', '5wWTst4', '8wWTst1', '8wWTst2', '8wWTst3', '12wWTst1', '12wWTst2',
'12wWTst3']), 'location'] = 'striatum'
hosp_3.loc[hosp_3['sample_id'].isin(
['5wWT1', '5wWT2', '5wWT3', '8wWT1', '8wWT2', '8wWT3', '12wWT1', '12wWT2', '12wWT3']), 'location'] = 'csf'
return hosp_3
def get_itzhak_2017_dataframe():
"""
Return pandas dataframe for Itzhak 2017
:return:
pandas.core.frame.DataFrame: dataframe containing Itzhak 2017 data.
"""
print("Importing itzhak 2017 pandas dataframe. This can last a while.")
itzhak_concf = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717311889-mmc4.xlsx')
itzhak_conc = itzhak_concf.parse('Mouse Neuron Spatial Proteome')
itzhak_conc = itzhak_conc.rename(columns={'Lead gene name': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Prediction': 'location',
'MW (kD)': 'molecular_weight_kDa',
'Median cellular concentration [nM]': 'raw_data'})
itzhak_conc = itzhak_conc.replace([np.inf, -np.inf], np.nan)
itzhak_conc = itzhak_conc.drop(['Canonical lead protein ID', 'Protein names', 'Marker protein?',
'SVM score', 'Subcellular distribution',
'Prediction confidence class', 'Median copy number', 'Max copy number',
'Min copy number',
'Abundance percentile (copy numbers)', 'Max cellular concentration [nM]',
'Min cellular concentration [nM]', 'ppm of total cell mass',
'Average Nuclear Fraction', 'Average Membrane',
'Average Cytosol Fraction', 'MSMS count'], axis=1)
itzhak_conc['raw_data_units'] = 'Median cellular concentration [nM]'
itzhak_conc['Study'] = 'Itzhak 2017'
itzhak_conc['Organism'] = 'mouse'
itzhak_conc['Age_days'] = 15
itzhak_conc['Age_cat'] = 'embr'
itzhak_conc.loc[itzhak_conc['location'].isna(),'location'] = 'subcellular not specified'
return itzhak_conc
def get_beltran_2016_dataframe():
"""
Return pandas dataframe for Beltran 2016
:return:
pandas.core.frame.DataFrame: dataframe containing Beltran 2016 data.
"""
print("Importing Beltran 2016 pandas dataframe")
beltranf = pd.ExcelFile('../data/source_data/1-s2.0-S2405471216302897-mmc2.xlsx')
beltran = beltranf.parse('TableS1A', skiprows=2, index_col=None)
beltran = beltran.rename(columns={'Gene': 'gene_names',
'Uniprot Accession': 'Uniprot',
'24hpi': 'infected 24hpi',
'48hpi': 'infected 48hpi',
'72hpi': 'infected 72hpi', '96hpi': 'infected 96hpi',
'120hpi': 'infected 120hpi', '24hpi.1': 'Uninfected (Mock) 24hpi'})
beltran = beltran.drop(['infected 24hpi', 'infected 48hpi', 'infected 72hpi', 'infected 96hpi', 'infected 120hpi'],
axis=1)
beltran = beltran.rename(columns={'Uninfected (Mock) 24hpi': 'raw_data'})
beltran = beltran[beltran['Organism'] == 'Homo sapiens (Human)']
beltran['raw_data_units'] = 'iBAQ'
beltran['Study'] = 'Beltran 2016'
beltran['Age_days'] = 0
beltran['Age_cat'] = 'embr'
# Protein abundance (iBAQ values)
beltran['Organism'] = 'human'
beltran['gene_names'] = beltran['gene_names'].str.upper()
beltranLocf = pd.ExcelFile('../data/source_data/1-s2.0-S2405471216302897-mmc5.xlsx')
beltranLoc = beltranLocf.parse('Label-Free Localization', skiprows=2, index_col=None)
beltranLoc = beltranLoc[beltranLoc['Organism'] == 'Homo sapiens (Human)']
beltranLoc = beltranLoc[['Uniprot Accession', 'Gene', '24hpi.1']]
beltranLoc = beltranLoc.rename(
columns={'Uniprot Accession': 'Uniprot', 'Gene': 'gene_names', '24hpi.1': 'location'})
beltranLoc['gene_names'] = beltranLoc['gene_names'].str.upper()
beltranfin = pd.merge(beltran, beltranLoc, on=['Uniprot', 'gene_names'], how='inner')
beltranfin.loc[beltranfin['location'].isna(), 'location'] = 'subcellular not specified'
return beltranfin
def get_sharma_2015_dataframe():
"""
Return pandas dataframe for Sharma 2015
:return:
pandas.core.frame.DataFrame: dataframe containing Sharma 2015 data.
"""
print("Importing Sharma 2015 pandas dataframe. This can last a while.")
sharma4fo = pd.ExcelFile('../data/source_data/nn.4160-S4.xlsx')
sharma4o = sharma4fo.parse('For Suppl table 2 proteinGroups', skiprows=2)
sharmaf = pd.ExcelFile('../data/source_data/nn.4160-S7.xlsx')
sharma = sharmaf.parse('Sheet1')
sharma4o.columns = ['gene_names', 'Protein names',
'Log2LFQintensity_IsolatedAstrocytes',
'Log2LFQintensity_IsolatedMicroglia',
'Log2LFQintensity_IsolatedNeurons',
'Log2LFQintensity_IsolatedOligodendrocytes',
'Log2LFQintensity_Brain',
'Log2LFQintensity_Brainstem',
'Log2LFQintensity_Cerebellum',
'Log2LFQintensity_CorpusCallosum',
'Log2LFQintensity_MotorCortex',
'Log2LFQintensity_OlfactoryBulb',
'Log2LFQintensity_OpticNerve',
'Log2LFQintensity_PrefrontalCortex',
'Log2LFQintensity_Striatum',
'Log2LFQintensity_Thalamus',
'Log2LFQintensity_VentralHippocampus',
'Log2LFQintensity_CerebellumP05',
'Log2LFQintensity_CerebellumP14',
'Log2LFQintensity_CerebellumP24',
'Standard Deviation Isolated Astrocytes', 'Standard Deviation Isolated Microglia',
'Standard Deviation Isolated Neurons', 'Standard Deviation Isolated Oligodendrocytes',
'Standard Deviation Brain',
'Standard Deviation Brainstem', 'Standard Deviation Cerebellum',
'Standard Deviation Corpus Callosum', 'Standard Deviation MotorCortex',
'Standard Deviation Olfactory Bulb', 'Standard Deviation Optic Nerve',
'Standard Deviation Prefrontal Cortex', 'Standard Deviation Striatum',
'Standard Deviation Thalamus', 'Standard Deviation Ventral Hippocampus',
'Standard Deviation Cerebellum P05',
'Standard Deviation Cerebellum P14', 'Standard Deviation Cerebellum P24', 'Peptides',
'Sequence coverage [%]', 'molecular_weight_kDa', 'Score',
'Uniprot']
sharma4o = sharma4o.drop(columns=['Protein names',
'Standard Deviation Isolated Astrocytes',
'Standard Deviation Isolated Microglia',
'Standard Deviation Isolated Neurons',
'Standard Deviation Isolated Oligodendrocytes',
'Standard Deviation Brain', 'Standard Deviation Brainstem',
'Standard Deviation Cerebellum', 'Standard Deviation Corpus Callosum',
'Standard Deviation MotorCortex', 'Standard Deviation Olfactory Bulb',
'Standard Deviation Optic Nerve',
'Standard Deviation Prefrontal Cortex', 'Standard Deviation Striatum',
'Standard Deviation Thalamus', 'Standard Deviation Ventral Hippocampus',
'Standard Deviation Cerebellum P05',
'Standard Deviation Cerebellum P14',
'Standard Deviation Cerebellum P24', 'Peptides',
'Sequence coverage [%]', 'Score'])
sharma.columns = ['gene_names', 'Protein names', 'LFQintensity_adultMicroglia1',
'LFQintensity_adultMicroglia2', 'LFQintensity_adultMicroglia3',
'LFQintensity_youngMicroglia1', 'LFQintensity_youngMicroglia2',
'LFQintensity_youngMicroglia3', 'LFQintensity_Astrocytes1',
'LFQintensity_Astrocytes2', 'LFQintensity_Astrocytes3',
'LFQintensity_Neuronsdiv051',
'LFQintensity_Neuronsdiv052',
'LFQintensity_Neuronsdiv053',
'LFQintensity_Neuronsdiv101',
'LFQintensity_Neuronsdiv102',
'LFQintensity_Neuronsdiv103',
'LFQintensity_Neuronsdiv151',
'LFQintensity_Neuronsdiv152',
'LFQintensity_Neuronsdiv153',
'LFQintensity_Oligodendrocytesdiv11',
'LFQintensity_Oligodendrocytesdiv12',
'LFQintensity_Oligodendrocytesdiv13',
'LFQintensity_Oligodendrocytesdiv251',
'LFQintensity_Oligodendrocytesdiv252',
'LFQintensity_Oligodendrocytesdiv253',
'LFQintensity_Oligodendrocytesdiv41',
'LFQintensity_Oligodendrocytesdiv42',
'LFQintensity_Oligodendrocytesdiv43', 'PEP',
'molecular_weight_kDa', 'Sequence coverage [%]', 'Protein IDs',
'Uniprot']
sharma = sharma.drop(['Protein names', 'PEP', 'Sequence coverage [%]', 'Protein IDs'], axis=1)
sharma4o_df = pd.wide_to_long(sharma4o, stubnames='Log2LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
sharma4o_df = sharma4o_df.reset_index()
sharma4o_df['raw_data'] = 2 ** sharma4o_df['Log2LFQintensity']
sharma4o_df['Study'] = 'Sharma 2015, isolated'
sharma4o_df['Organism'] = 'mouse'
sharma4o_df['raw_data_units'] = 'LFQintensity'
sharma4o_df = sharma4o_df.drop('Log2LFQintensity', axis=1)
sharma_df = pd.wide_to_long(sharma, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
sharma_df = sharma_df.reset_index()
sharma_df['Study'] = 'Sharma 2015, cultured'
sharma_df['Organism'] = 'mouse'
sharma_df['raw_data_units'] = 'LFQintensity'
sharma_df['Age_days'] = 0 # 'cultured cells'
sharma_df = sharma_df.rename(columns={'LFQintensity': 'raw_data'})
sharma_df.loc[sharma_df['sample_id'].isin(['Neuronsdiv051',
'Neuronsdiv052', 'Neuronsdiv053', 'Neuronsdiv101', 'Neuronsdiv102',
'Neuronsdiv103', 'Neuronsdiv151', 'Neuronsdiv152',
'Neuronsdiv153']), 'location'] = 'neurons'
sharma_df.loc[sharma_df['sample_id'].isin(['Astrocytes1', 'Astrocytes2', 'Astrocytes3']), 'location'] = 'astrocytes'
sharma_df.loc[sharma_df['sample_id'].isin(['adultMicroglia1', 'adultMicroglia2', 'adultMicroglia3',
'youngMicroglia1', 'youngMicroglia2',
'youngMicroglia3']), 'location'] = 'microglia'
sharma_df.loc[sharma_df['sample_id'].isin(['Oligodendrocytesdiv11', 'Oligodendrocytesdiv12',
'Oligodendrocytesdiv13', 'Oligodendrocytesdiv251',
'Oligodendrocytesdiv252', 'Oligodendrocytesdiv253',
'Oligodendrocytesdiv41', 'Oligodendrocytesdiv42',
'Oligodendrocytesdiv43']), 'location'] = 'oligodendrocytes'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedAstrocytes', 'IsolatedMicroglia', 'IsolatedNeurons',
'IsolatedOligodendrocytes']), 'Age_days'] = 29 # 8 + 21 # 'P8'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Brain', 'Brainstem', 'Cerebellum',
'CorpusCallosum', 'MotorCortex', 'OlfactoryBulb', 'OpticNerve',
'PrefrontalCortex', 'Striatum', 'Thalamus',
'VentralHippocampus', ]), 'Age_days'] = 81 # 60 + 21 'P60'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CerebellumP05']), 'Age_days'] = 26 # 5 + 21 # 'P5'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CerebellumP14']), 'Age_days'] = 35 # 14 + 21 # 'P14'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CerebellumP24']), 'Age_days'] = 45 # 24 + 21 # 'P24'
###
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedAstrocytes']), 'location'] = 'astrocytes'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedMicroglia']), 'location'] = 'microglia'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedNeurons']), 'location'] = 'neurons'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedOligodendrocytes']), 'location'] = 'oligodendrocytes'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Brain']), 'location'] = 'brain'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Brainstem']), 'location'] = 'brainstem'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(
['Cerebellum', 'CerebellumP05', 'CerebellumP14', 'CerebellumP24']), 'location'] = 'cerebellum'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CorpusCallosum']), 'location'] = 'corpus callosum'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['MotorCortex', 'PrefrontalCortex']), 'location'] = 'cortex'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['VentralHippocampus']), 'location'] = 'hippocampus'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['OlfactoryBulb']), 'location'] = 'olfactory bulb'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Striatum']), 'location'] = 'striatum'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['OpticNerve']), 'location'] = 'optic nerve'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Thalamus']), 'location'] = 'thalamus'
return sharma4o_df, sharma_df
def get_wisniewski_2015_dataframe():
"""
Return pandas dataframe for Wisńiewski 2015
:return:
pandas.core.frame.DataFrame: dataframe containing Wisniewski 2015 data.
"""
print("Importing Wisńiewski 2015 pandas dataframe")
wisniewskif = pd.ExcelFile('../data/source_data/pr5b00276_si_001.xlsx')
wisniewski = wisniewskif.parse('Sheet1')
wisniewski = wisniewski[~((wisniewski['Protein concentration (mol/g protein) Brain1'] == 0) &
(wisniewski['Protein concentration (mol/g protein) Brain2'] == 0) &
(wisniewski['Protein concentration (mol/g protein) Brain3'] == 0))]
wisniewski = wisniewski.drop(
['Protein IDs', 'Protein names', 'Total protein Brain1', 'Total protein Brain2', 'Total protein Brain3'],
axis=1)
wisniewski = wisniewski.rename(columns={'Majority protein IDs': 'Uniprot',
'Gene names': 'gene_names',
'Protein concentration (mol/g protein) Brain1': 'Conc_1',
'Protein concentration (mol/g protein) Brain2': 'Conc_2',
'Protein concentration (mol/g protein) Brain3': 'Conc_3'})
wisniewski_df = pd.wide_to_long(wisniewski, stubnames='Conc',
i=['Uniprot', 'gene_names'],
j='sample_id', sep='_', suffix=r'\w+')
wisniewski_df = wisniewski_df.reset_index()
wisniewski_df['Study'] = 'Wisniewski 2015'
wisniewski_df['Organism'] = 'mouse'
wisniewski_df['location'] = 'brain'
wisniewski_df['Age_days'] = 10 * 7 + 21 # adult
wisniewski_df['raw_data_units'] = 'Protein concentration (mol/g protein)'
wisniewski_df = wisniewski_df.rename(columns={'Conc': 'raw_data'})
return wisniewski_df
def get_han_2014_dataframe():
"""
Return pandas dataframe for Han 2014
:return:
pandas.core.frame.DataFrame: dataframe containing Han 2014 data.
"""
print("Importing Han 2014 pandas dataframe")
hanf = pd.ExcelFile('../data/source_data/pmic7746-sup-0001-tables1.xlsx')
han = hanf.parse('Sheet1')
# six technical replicates of two samples (conditioned media (CM) and whole-cell lysates (WCL))
han = han.rename(columns={'Gene symbol': 'gene_names',
'LFQ intensity WCL_set1_tech1': 'LFQintensity_WCLset1tech1',
'LFQ intensity WCL_set1_tech2': 'LFQintensity_WCLset1tech2',
'LFQ intensity WCL_set1_tech3': 'LFQintensity_WCLset1tech3',
'LFQ intensity WCL_set2_tech1': 'LFQintensity_WCLset2tech1',
'LFQ intensity WCL_set2_tech2': 'LFQintensity_WCLset2tech2',
'LFQ intensity WCL_set2_tech3': 'LFQintensity_WCLset2tech3',
'LFQ intensity WCL_set3_tech1': 'LFQintensity_WCLset3tech1',
'LFQ intensity WCL_set3_tech2': 'LFQintensity_WCLset3tech2',
'LFQ intensity WCL_set3_tech3': 'LFQintensity_WCLset3tech3'
})
han = han.drop(['Protein IDs', 'Majority protein IDs', 'Leading protein', 'Intensity', 'Intensity CM_set1_tech1',
'Intensity CM_set1_tech2', 'Intensity CM_set1_tech3',
'Intensity CM_set2_tech1', 'Intensity CM_set2_tech2',
'Intensity CM_set2_tech3', 'Intensity CM_set3_tech1',
'Intensity CM_set3_tech2', 'Intensity CM_set3_tech3', 'LFQ intensity CM_set1_tech1',
'LFQ intensity CM_set1_tech2', 'LFQ intensity CM_set1_tech3',
'LFQ intensity CM_set2_tech1', 'LFQ intensity CM_set2_tech2',
'LFQ intensity CM_set2_tech3', 'LFQ intensity CM_set3_tech1',
'LFQ intensity CM_set3_tech2', 'LFQ intensity CM_set3_tech3', 'MS/MS Count CM_set1_tech1',
'MS/MS Count CM_set1_tech2', 'MS/MS Count CM_set1_tech3',
'MS/MS Count CM_set2_tech1', 'MS/MS Count CM_set2_tech2',
'MS/MS Count CM_set2_tech3', 'MS/MS Count CM_set3_tech1',
'MS/MS Count CM_set3_tech2', 'MS/MS Count CM_set3_tech3',
'MS/MS Count WCL_set1_tech1', 'MS/MS Count WCL_set1_tech2',
'MS/MS Count WCL_set1_tech3', 'MS/MS Count WCL_set2_tech1',
'MS/MS Count WCL_set2_tech2', 'MS/MS Count WCL_set2_tech3',
'MS/MS Count WCL_set3_tech1', 'MS/MS Count WCL_set3_tech2',
'MS/MS Count WCL_set3_tech3', 'Intensity WCL_set1_tech1',
'Intensity WCL_set1_tech2', 'Intensity WCL_set1_tech3', 'Intensity WCL_set2_tech1',
'Intensity WCL_set2_tech2', 'Intensity WCL_set2_tech3', 'Intensity WCL_set3_tech1',
'Intensity WCL_set3_tech2', 'Intensity WCL_set3_tech3'], axis=1)
han = han[han['gene_names'] != '-']
han_df = pd.wide_to_long(han, stubnames='LFQintensity',
i=['Uniprot', 'gene_names'],
j='sample_id', sep='_', suffix=r'\w+')
han_df = han_df.reset_index()
han_df['Study'] = 'Han 2014'
han_df['Organism'] = 'mouse'
han_df['raw_data_units'] = 'LFQintensity'
han_df['Age_days'] = 0 # 'cultured cells'
han_df['location'] = 'astrocytes'
han_df = han_df.rename(columns={'LFQintensity': 'raw_data'})
return han_df
def get_geiger_2013_dataframe():
"""
Return pandas dataframe for Geiger 2013
:return:
pandas.core.frame.DataFrame: dataframe containing Geiger 2013 data.
"""
print("Importing Geiger 2013 pandas dataframe. This operation can last a while.")
geigerf = pd.ExcelFile('../data/source_data/mcp.M112.024919-2.xlsx')
geiger = geigerf.parse('Suppl Table S1', skiprows=1, index_col=None)
geiger = geiger.drop(['Protein IDs', 'Protein names', 'Peptides', 'Razor + unique peptides', 'Unique peptides',
'Sequence coverage [%]', 'PEP',
'Ratio H/L normalized', 'Ratio H/L normalized Adrenal gland',
'Ratio H/L normalized Brain cortex',
'Ratio H/L normalized Brain medulla', 'Ratio H/L normalized Brown fat',
'Ratio H/L normalized Cerebellum', 'Ratio H/L normalized Colon',
'Ratio H/L normalized Diaphragm', 'Ratio H/L normalized Duodenum',
'Ratio H/L normalized Embryonic tissue', 'Ratio H/L normalized Eye',
'Ratio H/L normalized Heart', 'Ratio H/L normalized Ileum',
'Ratio H/L normalized Jejunum', 'Ratio H/L normalized Kidney cortex',
'Ratio H/L normalized Kidney medulla', 'Ratio H/L normalized Liver',
'Ratio H/L normalized Lung', 'Ratio H/L normalized Midbrain',
'Ratio H/L normalized Muscle', 'Ratio H/L normalized Olfactory bulb',
'Ratio H/L normalized Ovary', 'Ratio H/L normalized Pancreas',
'Ratio H/L normalized Salivary gland', 'Ratio H/L normalized Spleeen',
'Ratio H/L normalized Stomach', 'Ratio H/L normalized Thymus',
'Ratio H/L normalized Uterus', 'Ratio H/L normalized White fat',
'Intensity L Adrenal gland', 'Intensity L Brown fat', 'Intensity L Colon',
'Intensity L Diaphragm',
'Intensity L Duodenum', 'Intensity L Embryonic tissue',
'Intensity L Eye', 'Intensity L Heart', 'Intensity L Ileum',
'Intensity L Jejunum', 'Intensity L Kidney cortex',
'Intensity L Kidney medulla', 'Intensity L Liver', 'Intensity L Lung', 'Intensity L Muscle',
'Intensity L Ovary',
'Intensity L Pancreas', 'Intensity L Salivary gland',
'Intensity L Spleeen', 'Intensity L Stomach', 'Intensity L Thymus',
'Intensity L Uterus', 'Intensity L White fat'], axis=1)
geiger = geiger.rename(columns={'Majority protein IDs': 'Uniprot',
'Gene names': 'gene_names',
'Mol. weight [kDa]': 'molecular_weight_kDa',
'Intensity L Brain cortex': 'IntensityL_cortex',
'Intensity L Brain medulla': 'IntensityL_medulla',
'Intensity L Cerebellum': 'IntensityL_cerebellum',
'Intensity L Midbrain': 'IntensityL_midbrain',
'Intensity L Olfactory bulb': 'IntensityL_olfactorybulb'
})
# Lys-C
geiger = geiger[~geiger['Uniprot'].isna()]
geiger_df = pd.wide_to_long(geiger, stubnames='IntensityL',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='location', sep='_', suffix=r'\w+')
geiger_df = geiger_df.reset_index()
geiger_df['Study'] = 'Geiger 2013'
geiger_df['Organism'] = 'mouse'
geiger_df['raw_data_units'] = 'IntensityL'
geiger_df['Age_days'] = 10 * 7 + 21 # adult
geiger_df.loc[geiger_df['location'] == 'olfactorybulb', 'location'] = 'olfactory bulb'
geiger_df = geiger_df.rename(columns={'IntensityL': 'raw_data'})
return geiger_df
def get_bai_2020_dataframe():
"""
Return pandas dataframe for Bai 2020
:return:
pandas.core.frame.DataFrame: dataframe containing Bai 2020 data.
"""
print("Importing Bai 2020 pandas dataframe.")
# mouse samples:
bai2020_f = pd.ExcelFile('../data/source_data/1-s2.0-S089662731931058X-mmc7.xlsx')
bai2020 = bai2020_f.parse('Sheet1', skiprows=4)
bai2020 = bai2020.drop(['LPC1', 'LPC2', 'HPC1', 'HPC2', 'MCI1', 'MCI2', 'AD1', 'AD2', 'PSP1', 'PSP2'],
axis=1) # these are human samples
bai2020 = bai2020.rename(columns={'Human gene name': 'gene_names',
'Human protein accession': 'Uniprot_human',
'Mouse protein accession': 'Uniprot_mouse'
})
# bai2020[bai2020['Uniprot_mouse'].str.contains('CON_ENSBTAP00000024146')] #Q61838
bai2020['Uniprot_human'] = bai2020['Uniprot_human'].str.split("|").str[1]
bai2020['Uniprot_mouse'] = bai2020['Uniprot_mouse'].str.split("|").str[1]
# [i for i in bai2020['Uniprot_mouse'].unique() if len(i)>6 ]
bai2020.loc[bai2020['Uniprot_mouse']=='CON_ENSBTAP00000024146','Uniprot_mouse'] = 'Q61838' #CON_ENSBTAP00000024146 fix by human uniprot
# [i for i in bai2020['Uniprot_human'].unique() if len(i)>6 ]
bai2020['Uniprot'] = bai2020['Uniprot_mouse'] + ";" + bai2020['Uniprot_human']
bai2020 = bai2020.drop(['Uniprot_mouse','Uniprot_human'],axis=1)
bai2020 = bai2020.drop_duplicates(keep='first')
bai2020[bai2020[['Uniprot','gene_names']].duplicated(keep=False)]
bai2020_df = pd.wide_to_long(bai2020, stubnames='tmt',
i=['Uniprot','gene_names'],
j='sample_id',sep='_',suffix='\w+')
bai2020_df = bai2020_df.reset_index()
bai2020_df['Organism'] = 'mouse' # there were human and mouse data, and here I used only mouse data, human data is imported by function get_human_samples_bai_2020_dataframe()
bai2020_df['location'] = 'cortex'
bai2020_df['raw_data_units'] = 'Protein Abundance (Summerized TMT Reporter Ion Intensities)'
bai2020_df['Study'] = 'Bai 2020'
bai2020_df = bai2020_df.rename(columns = {'tmt':'raw_data'})
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT3M1', 'WT3M2','AD3M1', 'AD3M2']),'Age_days'] = 111 # 3*30+21 # 3 months
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT6M1', 'WT6M2', 'WT6M3', 'WT6M4', 'AD6M1', 'AD6M2', 'AD6M3', 'AD6M4']),'Age_days'] = 201 # 6*30+21 # 6 months
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT12M1', 'WT12M2', 'AD12M1', 'AD12M2']),'Age_days'] = 386 # 365+21 # 12 months
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT3M1', 'WT3M2', 'WT6M1', 'WT6M2', 'WT6M3', 'WT6M4', 'WT12M1', 'WT12M2']),'condition'] = 'control'
bai2020_df.loc[bai2020_df['sample_id'].isin(['AD3M1', 'AD3M2', 'AD6M1', 'AD6M2', 'AD6M3', 'AD6M4','AD12M1', 'AD12M2']),'condition'] = 'AD' # 5xFAD mice Alzheimer model
return bai2020_df
def get_human_samples_bai_2020_dataframe():
"""
Return pandas dataframe for human samples Bai 2020
:return:
pandas.core.frame.DataFrame: dataframe containing human samples 2020 data.
"""
print("Importing human samples Bai 2020 pandas dataframe.")
bai2020human_f = pd.ExcelFile('../data/source_data/1-s2.0-S089662731931058X-mmc7.xlsx')
bai2020human = bai2020human_f.parse('Sheet1', skiprows=4)
bai2020human = bai2020human.drop(['tmt_WT3M1', 'tmt_WT3M2', 'tmt_WT6M1', 'tmt_WT6M2', 'tmt_WT6M3',
'tmt_WT6M4', 'tmt_WT12M1', 'tmt_WT12M2', 'tmt_AD3M1', 'tmt_AD3M2',
'tmt_AD6M1', 'tmt_AD6M2', 'tmt_AD6M3', 'tmt_AD6M4', 'tmt_AD12M1',
'tmt_AD12M2'], axis=1) # these are mouse samples
bai2020human = bai2020human.rename(columns={'Human gene name': 'gene_names',
'Human protein accession': 'Uniprot_human',
'Mouse protein accession': 'Uniprot_mouse'
})
bai2020human['Uniprot_human'] = bai2020human['Uniprot_human'].str.split("|").str[1]
bai2020human['Uniprot_mouse'] = bai2020human['Uniprot_mouse'].str.split("|").str[1]
# [i for i in bai2020human['Uniprot_mouse'].unique() if len(i)>6 ]
# bai2020human[bai2020human['Uniprot_mouse'] == 'CON_ENSBTAP00000024146']
bai2020human.loc[bai2020human[
'Uniprot_mouse'] == 'CON_ENSBTAP00000024146', 'Uniprot_mouse'] = 'Q61838'
# CON_ENSBTAP00000024146 fix by human uniprot
# [i for i in bai2020human['Uniprot_human'].unique() if len(i)>6 ]
bai2020human['Uniprot'] = bai2020human['Uniprot_mouse'] + ";" + bai2020human['Uniprot_human']
bai2020human = bai2020human.drop(['Uniprot_mouse', 'Uniprot_human'], axis=1)
bai2020human = bai2020human.drop_duplicates(keep='first')
bai2020human = bai2020human.rename(columns = { 'LPC1':'humandat_LPC1',
'LPC2':'humandat_LPC2',
'HPC1':'humandat_HPC1',
'HPC2':'humandat_HPC2',
'MCI1':'humandat_MCI1',
'MCI2':'humandat_MCI2',
'AD1':'humandat_AD1',
'AD2':'humandat_AD2',
'PSP1':'humandat_PSP1',
'PSP2':'humandat_PSP2'
})
bai2020human_df = pd.wide_to_long(bai2020human, stubnames='humandat',
i=['Uniprot','gene_names'],
j='sample_id',sep='_',suffix='\w+')
bai2020human_df = bai2020human_df.reset_index()
bai2020human_df['Organism'] = 'human' # there were human and mouse data, and I used only mouse data
bai2020human_df['location'] = 'cortex' # frontal cortical samples of 100 human cases
bai2020human_df['raw_data_units'] = 'Protein Abundance (Summerized TMT Reporter Ion Intensities)'
bai2020human_df['Study'] = 'Bai 2020'
bai2020human_df = bai2020human_df.rename(columns = {'humandat':'raw_data'})
bai2020human_df.loc[bai2020human_df['sample_id'].isin(['LPC1', 'LPC2']),'condition'] = 'LPC: low pathology of plaques and tangles. AD'
bai2020human_df.loc[bai2020human_df['sample_id'].isin(['HPC1', 'HPC2']),'condition'] = 'HPC: high Ab pathology but no detectable cognitive defects. AD'
bai2020human_df.loc[bai2020human_df['sample_id'].isin(['MCI1', 'MCI2']),'condition'] = 'MCI: mild cognitive impairment with Ab pathology and a slight but measurable defect in cognition. AD'
bai2020human_df.loc[bai2020human_df['sample_id'].isin(['AD1', 'AD2']),'condition'] = 'AD: late-stage AD with high pathology scores of plaques and tangles'
bai2020human_df.loc[bai2020human_df['sample_id'].isin(['PSP1', 'PSP2']),'condition'] = 'PSP: progressive supranuclear palsy, another neurodegenerative disorder of tauopathy'
bai2020human_df.loc[:,'Age_days'] = 'post-mortem'
bai2020human_df = bai2020human_df.reset_index(drop=True)
return bai2020human_df
def get_carlyle_2017_dataframe():
"""
Return pandas dataframe for Carlyle 2017
:return:
pandas.core.frame.DataFrame: dataframe containing Carlyle 2017 data.
"""
print("Importing Carlyle 2017 pandas dataframe.")
# from 41593_2017_11_MOESM12_ESM data in 41593_2017_11_MOESM6_ESM is log10LFQ
carlyle2017 = pd.read_csv('../data/source_data/41593_2017_11_MOESM6_ESM.txt', sep='\t')
carlyle2017 = carlyle2017.drop(['EnsemblID'], axis=1)
carlyle2017 = carlyle2017.rename(columns={'GeneSymbol': 'gene_names',
'HSB118_AMY': 'Log10LFQ_118AMY',
'HSB118_CBC': 'Log10LFQ_118CBC',
'HSB118_DFC': 'Log10LFQ_118DFC',
'HSB118_HIP': 'Log10LFQ_118HIP',
'HSB118_MD': 'Log10LFQ_118MD',
'HSB118_STR': 'Log10LFQ_118STR',
'HSB118_V1C': 'Log10LFQ_118V1C',
'HSB119_AMY': 'Log10LFQ_119AMY',
'HSB121_AMY': 'Log10LFQ_121AMY',
'HSB122_AMY': 'Log10LFQ_122AMY',
'HSB122_CBC': 'Log10LFQ_122CBC',
'HSB122_DFC': 'Log10LFQ_122DFC',
'HSB122_HIP': 'Log10LFQ_122HIP',
'HSB122_MD': 'Log10LFQ_122MD',
'HSB122_STR': 'Log10LFQ_122STR',
'HSB122_V1C': 'Log10LFQ_122V1C',
'HSB123_AMY': 'Log10LFQ_123AMY',
'HSB123_CBC': 'Log10LFQ_123CBC',
'HSB123_DFC': 'Log10LFQ_123DFC',
'HSB123_HIP': 'Log10LFQ_123HIP',
'HSB123_STR': 'Log10LFQ_123STR',
'HSB123_V1C': 'Log10LFQ_123V1C',
'HSB126_AMY': 'Log10LFQ_126AMY',
'HSB126_CBC': 'Log10LFQ_126CBC',
'HSB126_DFC': 'Log10LFQ_126DFC',
'HSB126_HIP': 'Log10LFQ_126HIP',
'HSB126_MD': 'Log10LFQ_126MD',
'HSB126_STR': 'Log10LFQ_126STR',
'HSB126_V1C': 'Log10LFQ_126V1C',
'HSB127_CBC': 'Log10LFQ_127CBC',
'HSB127_DFC': 'Log10LFQ_127DFC',
'HSB127_HIP': 'Log10LFQ_127HIP',
'HSB127_MD': 'Log10LFQ_127MD',
'HSB127_STR': 'Log10LFQ_127STR',
'HSB127_V1C': 'Log10LFQ_127V1C',
'HSB135_AMY': 'Log10LFQ_135AMY',
'HSB135_CBC': 'Log10LFQ_135CBC',
'HSB135_DFC': 'Log10LFQ_135DFC',
'HSB135_HIP': 'Log10LFQ_135HIP',
'HSB135_MD': 'Log10LFQ_135MD',
'HSB135_STR': 'Log10LFQ_135STR',
'HSB135_V1C': 'Log10LFQ_135V1C',
'HSB136_AMY': 'Log10LFQ_136AMY',
'HSB136_CBC': 'Log10LFQ_136CBC',
'HSB136_DFC': 'Log10LFQ_136DFC',
'HSB136_HIP': 'Log10LFQ_136HIP',
'HSB136_MD': 'Log10LFQ_136MD',
'HSB136_STR': 'Log10LFQ_136STR',
'HSB136_V1C': 'Log10LFQ_136V1C',
'HSB139_CBC': 'Log10LFQ_139CBC',
'HSB139_DFC': 'Log10LFQ_139DFC',
'HSB139_HIP': 'Log10LFQ_139HIP',
'HSB139_MD': 'Log10LFQ_139MD',
'HSB139_STR': 'Log10LFQ_139STR',
'HSB139_V1C': 'Log10LFQ_139V1C',
'HSB141_CBC': 'Log10LFQ_141CBC',
'HSB141_DFC': 'Log10LFQ_141DFC',
'HSB141_HIP': 'Log10LFQ_141HIP',
'HSB141_MD': 'Log10LFQ_141MD',
'HSB141_STR': 'Log10LFQ_141STR',
'HSB141_V1C': 'Log10LFQ_141V1C',
'HSB143_CBC': 'Log10LFQ_143CBC',
'HSB143_DFC': 'Log10LFQ_143DFC',
'HSB143_HIP': 'Log10LFQ_143HIP',
'HSB143_MD': 'Log10LFQ_143MD',
'HSB143_STR': 'Log10LFQ_143STR',
'HSB143_V1C': 'Log10LFQ_143V1C',
'HSB145_AMY': 'Log10LFQ_145AMY',
'HSB145_CBC': 'Log10LFQ_145CBC',
'HSB145_DFC': 'Log10LFQ_145DFC',
'HSB145_HIP': 'Log10LFQ_145HIP',
'HSB145_MD': 'Log10LFQ_145MD',
'HSB145_STR': 'Log10LFQ_145STR',
'HSB145_V1C': 'Log10LFQ_145V1C',
'HSB173_AMY': 'Log10LFQ_173AMY',
'HSB174_AMY': 'Log10LFQ_174AMY',
'HSB175_AMY': 'Log10LFQ_175AMY'})
carlyle2017_df = pd.wide_to_long(carlyle2017, stubnames='Log10LFQ',
i=['gene_names'],
j='sample_id', sep='_', suffix=r'\w+')
carlyle2017_df = carlyle2017_df.reset_index()
carlyle2017_df['Organism'] = 'human'
carlyle2017_df['Study'] = 'Carlyle 2017'
carlyle2017_df['raw_data'] = 10 ** carlyle2017_df['Log10LFQ']
carlyle2017_df['raw_data_units'] = 'LFQintensity'
carlyle2017_df = carlyle2017_df.drop(['Log10LFQ'], axis=1)
carlyle_samples = pd.read_csv('../data/source_data/41593_2017_11_MOESM3_ESM.txt',sep='\t')
# cerebellar cortex (CBC), striatum (STR), mediodorsal thalamic
# nucleus (MD), amygdala (AMY), hippocampus (HIP),
# primary visual cortex (V1C), dorsolateral prefrontal cortex (DFC)
#from early infancy (1 year after conception) to adulthood (42 years)
#LFQs from 7 brain regions of 16 individuals
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('AMY'),'location'] = 'amygdala'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('CBC'),'location'] = 'cerebellum'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('DFC'),'location'] = 'cortex'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('HIP'),'location'] = 'hippocampus'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('MD'),'location'] = 'thalamus'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('STR'),'location'] = 'striatum'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('V1C'),'location'] = 'cortex'
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('118'),'Age_days'] = 1726
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('119'),'Age_days'] = 5741
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('121'),'Age_days'] = 386
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('122'),'Age_days'] = 631
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('123'),'Age_days'] = 13771
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('126'),'Age_days'] = 11216
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('127'),'Age_days'] = 7201
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('135'),'Age_days'] = 14866
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('136'),'Age_days'] = 8661
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('139'),'Age_days'] = 386
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('141'),'Age_days'] = 3186
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('143'),'Age_days'] = 996
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('145'),'Age_days'] = 13406
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('173'),'Age_days'] = 1361
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('174'),'Age_days'] = 3186
carlyle2017_df.loc[carlyle2017_df['sample_id'].str.contains('175'),'Age_days'] = 4281
# due to mismatches in some pairs of gene names (KRT86;GFAP as an example) and absence of Uniprot ids in the data, 8.2 % of gn (containing more than one gene id per entry) are removed:
carlyle2017_df_filt = carlyle2017_df.loc[~carlyle2017_df['gene_names'].str.contains(';')]
return carlyle2017_df_filt
def get_davis_2019_dataframe():
"""
Return pandas dataframe for Davis 2019
:return:
pandas.core.frame.DataFrame: dataframe containing Davis 2019 data.
"""
print("Importing Davis 2019 pandas dataframe.")
# Davis 2019 (human brain, LFQ and iBAQ)
# primary motor cortex (Betz cells) and cerebellar cortex (Purkinje cells) - individual neurons
davis2019f = pd.ExcelFile('../data/source_data/pr8b00981_si_003.xlsx')
davis2019 = davis2019f.parse('proteinGroups', skiprows=2)
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Peptide')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Oxidation')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Unique peptides')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Razor')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Identification type')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Sequence coverage')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Intensity')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'LFQ')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Library')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'MS/MS')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Molecular Buffer')))]
davis2019 = davis2019[davis2019.columns.drop(list(davis2019.filter(regex=r'Molecular Cap')))]
davis2019 = davis2019.drop(
['Protein IDs', 'Protein names', 'Fasta headers', 'Only identified by site', 'Reverse', 'Potential contaminant',
'id', 'Mod. peptide IDs', 'Evidence IDs',
'Number of proteins', 'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]',
'Sequence length', 'Sequence lengths',
'Fraction average', 'Fraction 1', 'Fraction 2', 'Fraction 3',
'Fraction 10', 'Q-value', 'Score', 'iBAQ'], axis=1)
davis2019 = davis2019.rename(columns = {
'Majority protein IDs':'Uniprot', 'Gene names':'gene_names', 'Mol. weight [kDa]':'molecular_weight_kDa',
'iBAQ Betz Cap SP3 1':'iBAQ_BetzCapSP31',
'iBAQ Betz Cap SP3 2':'iBAQ_BetzCapSP32',
'iBAQ Betz Cap SP3 3':'iBAQ_BetzCapSP33',
'iBAQ Purkinje Cap SP3 1':'iBAQ_PurkinjeCapSP31',
'iBAQ Purkinje Cap SP3 2':'iBAQ_PurkinjeCapSP32',
'iBAQ Purkinje Cap SP3 3':'iBAQ_PurkinjeCapSP33',
'iBAQ Purkinje InCap 100':'iBAQ_PurkinjeInCap100',
'iBAQ Purkinje InCap 200':'iBAQ_PurkinjeInCap200',
'iBAQ Purkinje InCap 400':'iBAQ_PurkinjeInCap400',
'iBAQ Purkinje InCap 800':'iBAQ_PurkinjeInCap800',
'iBAQ Purkinje Spin 10':'iBAQ_PurkinjeSpin10',
'iBAQ Purkinje Spin 100':'iBAQ_PurkinjeSpin100',
'iBAQ Purkinje Spin 200':'iBAQ_PurkinjeSpin200',
'iBAQ Purkinje Spin 400':'iBAQ_PurkinjeSpin400',
'iBAQ Purkinje Spin 50':'iBAQ_PurkinjeSpin50',
'iBAQ Purkinje Spin 800':'iBAQ_PurkinjeSpin800'
})
davis2019_df = pd.wide_to_long(davis2019, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa',],
j='sample_id',sep='_',suffix='\w+')
davis2019_df = davis2019_df.reset_index()
davis2019_df['Organism'] = 'human'
davis2019_df['Study'] = 'Davis 2019'
davis2019_df['raw_data_units'] = 'iBAQ'
davis2019_df = davis2019_df.rename(columns = {'iBAQ':'raw_data'})
davis2019_df['location'] = 'neurons'
davis2019_df['Age_days'] = 'post-mortem'
davis2019_df['Age_cat'] = 'post-mortem'
return davis2019_df
def get_fecher_2019_dataframe():
"""
Return pandas dataframe for Fecher 2019
:return:
pandas.core.frame.DataFrame: dataframe containing Fecher 2019 data.
"""
print("Importing Fecher 2019 pandas dataframe.")
fecher2019f = pd.ExcelFile('../data/source_data/41593_2019_479_MOESM3_ESM.xlsx')
fecher2019 = fecher2019f.parse('Proteomics from PC, GC & A', skiprows=3)
fecher2019 = fecher2019.drop(
['ENSG', 'Protein name', 'MitoCarta', 'Candidate', 'N (IC GFP / IC Tom) Purkinje cell mito', 'Unnamed: 18',
'Peptides Purkinje cell mito',
'Sequence coverage [%] Purkinje cell mito', 'N (IC GFP / IC Tom) Granule cell mito', 'Unnamed: 33',
'Peptides Granule cell mito', 'Sequence coverage [%] Granule cell mito', 'N (IC GFP / IC Tom)Astrocytic mito',
'Unnamed: 50',
'Peptides Astrocytic mito', 'Sequence coverage [%] Astrocytic mito',
'Shared', 'Localization_LocTree3',
'Core mitochondrial function', 'mitochondria related', 'MIM morbid #',
'MolweightkDa_Purkinjecellmito',
'MolweightkDa_Granulecellmito',
'MolweightkDa_Astrocyticmito'], axis=1)
fecher2019 = fecher2019.rename(columns={
'Gene name': 'gene_names'
})
fecher2019_df = pd.wide_to_long(fecher2019, stubnames='log2LFQ',
i=['gene_names'],
j='sample_id',sep='_',suffix='\w+')
fecher2019_df = fecher2019_df.reset_index()
fecher2019_df['Organism'] = 'mouse' #
fecher2019_df['Study'] = 'Fecher 2019'
fecher2019_df['raw_data_units'] = 'LFQintensity'
fecher2019_df['raw_data'] = 2 ** fecher2019_df['log2LFQ']
fecher2019_df['location'] = 'mitochondria'
fecher2019_df['Age_days'] = 8 * 7 + 21 # 8- to 9-week-old male mice
fecher2019_df = fecher2019_df.drop(['log2LFQ'],axis=1)
fecher2019_df = fecher2019_df[~fecher2019_df['raw_data'].isna()]
return fecher2019_df
def get_fornasiero_2018_dataframe():
"""
Return pandas dataframe for Fornasiero 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Fornasiero 2018 data.
"""
print("Importing Fornasiero 2018 pandas dataframe.")
fornasierof = pd.ExcelFile('../data/source_data/41467_2018_6519_MOESM3_ESM.xlsx')
fornasiero2018 = fornasierof.parse('Data')
fornasiero2018 = fornasiero2018[
['Uniprot identifier', 'gene_names', 'Protein abundance in brain cortex (average iBAQ expressed as log10+10)']]
fornasiero2018['raw_data'] = 10 ** fornasiero2018[
'Protein abundance in brain cortex (average iBAQ expressed as log10+10)'] - 10
fornasiero2018['raw_data_units'] = 'iBAQ'
fornasiero2018 = fornasiero2018.drop(['Protein abundance in brain cortex (average iBAQ expressed as log10+10)'],
axis=1)
fornasiero2018 = fornasiero2018.rename(columns={'Uniprot identifier': 'Uniprot'})
fornasiero2018['Study'] = 'Fornasiero 2018'
fornasiero2018['Organism'] = 'mouse'
fornasiero2018['location'] = 'cortex'
fornasiero2018['Age_cat'] = 'adult' # adult by source
fornasiero2018[
'Age_days'] = 3.5 * 30 + 21 + 6*7 # 3.5 months # approximate, age in days is not used anywhere in the analysis, only age category appears in the analysis later
return fornasiero2018
def get_guergues_2019_dataframe():
"""
Return pandas dataframe for Guergues 2019
:return:
pandas.core.frame.DataFrame: dataframe containing Guergues 2019 data.
"""
print("Importing Guergues 2019 pandas dataframe.")
guergues2019f = pd.ExcelFile('../data/source_data/pmic13102-sup-0003-tables1.xlsx')
guergues2019 = guergues2019f.parse('proteinGroups')
guergues2019 = guergues2019.drop(
['Protein IDs', 'Protein IDs 1', 'Peptide counts (all)', 'Peptide counts (razor+unique)',
'Peptide counts (unique)', 'Protein names', 'Fasta headers', 'Number of proteins', 'Peptides',
'Razor + unique peptides', 'Unique peptides', 'Peptides STrap_300K_1',
'Peptides STrap_300K_2', 'Peptides STrap_300K_3',
'Razor + unique peptides STrap_300K_1',
'Razor + unique peptides STrap_300K_2',
'Razor + unique peptides STrap_300K_3', 'Unique peptides STrap_300K_1',
'Unique peptides STrap_300K_2', 'Unique peptides STrap_300K_3',
'Sequence coverage [%]', 'Unique + razor sequence coverage [%]',
'Unique sequence coverage [%]', 'Sequence length',
'Sequence lengths', 'Q-value', 'Score',
'Identification type STrap_300K_1', 'Identification type STrap_300K_2',
'Identification type STrap_300K_3',
'Sequence coverage STrap_300K_1 [%]',
'Sequence coverage STrap_300K_2 [%]',
'Sequence coverage STrap_300K_3 [%]', 'Intensity',
'Intensity STrap_300K_1', 'Intensity STrap_300K_2',
'Intensity STrap_300K_3',
'no imp LFQ intensity STrap_300K_1',
'no imp LFQ intensity STrap_300K_2',
'no imp LFQ intensity STrap_300K_3', 'MS/MS count STrap_300K_1',
'MS/MS count STrap_300K_2', 'MS/MS count STrap_300K_3', 'MS/MS count',
'id', 'Peptide IDs', 'Peptide is razor', 'Mod. peptide IDs',
'Evidence IDs', 'MS/MS IDs', 'Best MS/MS', 'Oxidation (M) site IDs',
'Oxidation (M) site positions', 'Gene names sort check'], axis=1)
guergues2019 = guergues2019.rename(columns={'Majority protein IDs': 'Uniprot',
'Gene names': 'gene_names',
'Mol. weight [kDa]': 'molecular_weight_kDa',
'LFQ intensity STrap_300K_1': 'LFQintensity_STrap300K1',
'LFQ intensity STrap_300K_2': 'LFQintensity_STrap300K2',
'LFQ intensity STrap_300K_3': 'LFQintensity_STrap300K3'
})
guergues2019_df = pd.wide_to_long(guergues2019, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
guergues2019_df = guergues2019_df.reset_index()
guergues2019_df['Organism'] = 'mouse'
guergues2019_df['Study'] = 'Guergues 2019'
guergues2019_df['raw_data_units'] = 'LFQintensity'
guergues2019_df = guergues2019_df.rename(columns={'LFQintensity': 'raw_data'})
guergues2019_df['location'] = 'microglia'
guergues2019_df['Age_cat'] = 'adult'
guergues2019_df[
'Age_days'] = 8 * 7 + 21 # cultured cells from 8-week-old C57BL/6J mice #
# by ref [4] <NAME> et al. J. Neuroinflamm. 2016
return guergues2019_df
def get_mcketney_2019_dataframe():
"""
Return pandas dataframe for McKetney 2019
:return:
pandas.core.frame.DataFrame: dataframe containing McKetney 2019 data.
"""
print("Importing McKetney 2019 pandas dataframe.")
mc_ketney2019f = pd.ExcelFile('../data/source_data/pr9b00004_si_002.xlsx')
mc_ketney2019 = mc_ketney2019f.parse('LFQ intensities', skiprows=1)
mc_ketney2019 = mc_ketney2019.drop(['Protein.IDs', 'Majority.protein.IDs', 'entrez_ids', 'geneNames',
'Peptides',
'Sequence coverage [%]', 'Peptide counts (all)',
'Peptide counts (razor+unique)', 'Peptide counts (unique)'], axis=1)
mc_ketney2019 = mc_ketney2019.rename(columns={'Reference Uniprot ID': 'Uniprot',
'geneSym': 'gene_names',
'LFQ.intensity.146_ AMY': 'LFQintensity_s146AMY',
'LFQ.intensity.146_ CNC': 'LFQintensity_s146CNC',
'LFQ.intensity.146_CBM': 'LFQintensity_s146CBM',
'LFQ.intensity.146_ECX': 'LFQintensity_s146ECX',
'LFQ.intensity.146_MFG': 'LFQintensity_s146MFG',
'LFQ.intensity.146_IPL': 'LFQintensity_s146IPL',
'LFQ.intensity.146_STG': 'LFQintensity_s146STG',
'LFQ.intensity.146_THA': 'LFQintensity_s146THA',
'LFQ.intensity.146_VCX': 'LFQintensity_s146VCX',
'LFQ.intensity.383_ AMY': 'LFQintensity_s383AMY',
'LFQ.intensity.383_ CNC': 'LFQintensity_s383CNC',
'LFQ.intensity.383_CBM': 'LFQintensity_s383CBM',
'LFQ.intensity.383_ECX': 'LFQintensity_s383ECX',
'LFQ.intensity.383_MFG': 'LFQintensity_s383MFG',
'LFQ.intensity.383_IPL': 'LFQintensity_s383IPL',
'LFQ.intensity.383_STG': 'LFQintensity_s383STG',
'LFQ.intensity.383_THA': 'LFQintensity_s383THA',
'LFQ.intensity.383_VCX': 'LFQintensity_s383VCX',
'LFQ.intensity.405_ AMY': 'LFQintensity_s405AMY',
'LFQ.intensity.405_ CNC': 'LFQintensity_s405CNC',
'LFQ.intensity.405_ECX': 'LFQintensity_s405ECX',
'LFQ.intensity.405_MFG': 'LFQintensity_s405MFG',
'LFQ.intensity.405_IPL': 'LFQintensity_s405IPL',
'LFQ.intensity.405_STG': 'LFQintensity_s405STG',
'LFQ.intensity.405_THA': 'LFQintensity_s405THA',
'LFQ.intensity.405_VCX': 'LFQintensity_s405VCX'
})
mc_ketney2019_df = pd.wide_to_long(mc_ketney2019, stubnames='LFQintensity',
i=['Uniprot', 'gene_names'],
j='sample_id', sep='_', suffix=r'\w+')
mc_ketney2019_df = mc_ketney2019_df.reset_index()
mc_ketney2019_df['Organism'] = 'human'
mc_ketney2019_df['Study'] = 'McKetney 2019'
mc_ketney2019_df['raw_data_units'] = 'LFQintensity'
mc_ketney2019_df = mc_ketney2019_df.rename(columns={'LFQintensity': 'raw_data'})
mc_ketney2019_df['Age_cat'] = 'adult' # “adult” samples (23−40 years old)
mc_ketney2019_df['Age_days'] = 31 * 365 # “adult” samples (23−40 years old)
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('AMY'), 'location'] = 'amygdala'
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('CNC'), 'location'] = 'striatum' # Caudate Nucleus
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('CBM'), 'location'] = 'cerebellum'
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('ECX'), 'location'] = 'cortex'
mc_ketney2019_df.loc[
mc_ketney2019_df['sample_id'].str.contains('IPL'), 'location'] = 'cortex' # Inferior Parietal Lobule;
mc_ketney2019_df.loc[
mc_ketney2019_df['sample_id'].str.contains('MFG'), 'location'] = 'cortex' # Middle Frontal Gyrus
mc_ketney2019_df.loc[
mc_ketney2019_df['sample_id'].str.contains('STG'), 'location'] = 'cortex' # Superior Temporal Gyrus
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('THA'), 'location'] = 'thalamus'
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('VCX'), 'location'] = 'cortex'
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('146'), 'condition'] = 'control'
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('383'), 'condition'] = 'AD_severe'
mc_ketney2019_df.loc[mc_ketney2019_df['sample_id'].str.contains('405'), 'condition'] = 'AD_intermediate'
# LFQ Intensities for all proteins identified in at least one sample.
# Case: 146 = No Tau Tangle; 383 = Severe Tangles; 405 = Intermediate Tangles.
# Sections: AMY=Amygdala; CNC=Caudate Nucleus CBM=Cerebellum; ECX=Entorhinal Cortex;
# IPL=Inferior Parietal Lobule; MFG=Middle Frontal Gyrus; STG=Superior Temporal Gyrus;
# THA=Thalamus; VCX=Visual Cortex;
return mc_ketney2019_df
def get_hasan_2020_dataframe():
"""
Return pandas dataframe for Hasan 2020
:return:
pandas.core.frame.DataFrame: dataframe containing Hasan 2020 data.
"""
print("Importing Hasan 2020 pandas dataframe.")
# Hasan 2020 (TMT 6 brain regions mouse)
hasan2020f =
|
pd.ExcelFile('../data/source_data/pmic13060-sup-0002-tables1.xlsx')
|
pandas.ExcelFile
|
import sys
import os
import socket
import shutil
import argparse
import json
import time
import donkeycar as dk
from donkeycar.parts.datastore import Tub, TubHandler # rbx TubHandler added
from donkeycar.utils import *
from donkeycar.management.tub import TubManager
from donkeycar.management.joystick_creator import CreateJoystick
import numpy as np
from donkeycar.utils import img_to_binary, binary_to_img, arr_to_img, img_to_arr
import cv2
PACKAGE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TEMPLATES_PATH = os.path.join(PACKAGE_PATH, 'templates')
def make_dir(path):
real_path = os.path.expanduser(path)
print('making dir ', real_path)
if not os.path.exists(real_path):
os.makedirs(real_path)
return real_path
def load_config(config_path):
'''
load a config from the given path
'''
conf = os.path.expanduser(config_path)
if not os.path.exists(conf):
print("No config file at location: %s. Add --config to specify\
location or run from dir containing config.py." % conf)
return None
try:
cfg = dk.load_config(conf)
except:
print("Exception while loading config from", conf)
return None
return cfg
class BaseCommand(object):
pass
class CreateCar(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='createcar', usage='%(prog)s [options]')
parser.add_argument('--path', default=None, help='path where to create car folder')
parser.add_argument('--template', default=None, help='name of car template to use')
parser.add_argument('--overwrite', action='store_true', help='should replace existing files')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
self.create_car(path=args.path, template=args.template, overwrite=args.overwrite)
def create_car(self, path, template='complete', overwrite=False):
"""
This script sets up the folder structure for donkey to work.
It must run without donkey installed so that people installing with
docker can build the folder structure for docker to mount to.
"""
#these are neeeded incase None is passed as path
path = path or '~/mycar'
template = template or 'complete'
print("Creating car folder: {}".format(path))
path = make_dir(path)
print("Creating data & model folders.")
folders = ['models', 'data', 'logs']
folder_paths = [os.path.join(path, f) for f in folders]
for fp in folder_paths:
make_dir(fp)
#add car application and config files if they don't exist
app_template_path = os.path.join(TEMPLATES_PATH, template+'.py')
config_template_path = os.path.join(TEMPLATES_PATH, 'cfg_' + template + '.py')
myconfig_template_path = os.path.join(TEMPLATES_PATH, 'myconfig.py')
train_template_path = os.path.join(TEMPLATES_PATH, 'train.py')
car_app_path = os.path.join(path, 'manage.py')
car_config_path = os.path.join(path, 'config.py')
mycar_config_path = os.path.join(path, 'myconfig.py')
train_app_path = os.path.join(path, 'train.py')
if os.path.exists(car_app_path) and not overwrite:
print('Car app already exists. Delete it and rerun createcar to replace.')
else:
print("Copying car application template: {}".format(template))
shutil.copyfile(app_template_path, car_app_path)
if os.path.exists(car_config_path) and not overwrite:
print('Car config already exists. Delete it and rerun createcar to replace.')
else:
print("Copying car config defaults. Adjust these before starting your car.")
shutil.copyfile(config_template_path, car_config_path)
if os.path.exists(train_app_path) and not overwrite:
print('Train already exists. Delete it and rerun createcar to replace.')
else:
print("Copying train script. Adjust these before starting your car.")
shutil.copyfile(train_template_path, train_app_path)
if not os.path.exists(mycar_config_path):
print("Copying my car config overrides")
shutil.copyfile(myconfig_template_path, mycar_config_path)
#now copy file contents from config to myconfig, with all lines commented out.
cfg = open(car_config_path, "rt")
mcfg = open(mycar_config_path, "at")
copy = False
for line in cfg:
if "import os" in line:
copy = True
if copy:
mcfg.write("# " + line)
cfg.close()
mcfg.close()
print("Donkey setup complete.")
class UpdateCar(BaseCommand):
'''
always run in the base ~/mycar dir to get latest
'''
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='update', usage='%(prog)s [options]')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
cc = CreateCar()
cc.create_car(path=".", overwrite=True)
class FindCar(BaseCommand):
def parse_args(self, args):
pass
def run(self, args):
print('Looking up your computer IP address...')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
ip = s.getsockname()[0]
print('Your IP address: %s ' %s.getsockname()[0])
s.close()
print("Finding your car's IP address...")
cmd = "sudo nmap -sP " + ip + "/24 | awk '/^Nmap/{ip=$NF}/B8:27:EB/{print ip}'"
print("Your car's ip address is:" )
os.system(cmd)
class CalibrateCar(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='calibrate', usage='%(prog)s [options]')
parser.add_argument('--channel', help="The channel you'd like to calibrate [0-15]")
parser.add_argument('--address', default='0x40', help="The i2c address you'd like to calibrate [default 0x40]")
parser.add_argument('--bus', default=None, help="The i2c bus you'd like to calibrate [default autodetect]")
parser.add_argument('--pwmFreq', default=60, help="The frequency to use for the PWM")
parser.add_argument('--arduino', dest='arduino', action='store_true', help='Use arduino pin for PWM (calibrate pin=<channel>)')
parser.set_defaults(arduino=False)
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
channel = int(args.channel)
if args.arduino == True:
from donkeycar.parts.actuator import ArduinoFirmata
arduino_controller = ArduinoFirmata(servo_pin=channel)
print('init Arduino PWM on pin %d' %(channel))
input_prompt = "Enter a PWM setting to test ('q' for quit) (0-180): "
else:
from donkeycar.parts.actuator import PCA9685
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
busnum = None
if args.bus:
busnum = int(args.bus)
address = int(args.address, 16)
print('init PCA9685 on channel %d address %s bus %s' %(channel, str(hex(address)), str(busnum)))
freq = int(args.pwmFreq)
print("Using PWM freq: {}".format(freq))
c = PCA9685(channel, address=address, busnum=busnum, frequency=freq)
input_prompt = "Enter a PWM setting to test ('q' for quit) (0-1500): "
print()
while True:
try:
val = input(input_prompt)
if val == 'q' or val == 'Q':
break
pmw = int(val)
if args.arduino == True:
arduino_controller.set_pulse(channel,pmw)
else:
c.run(pmw)
except KeyboardInterrupt:
print("\nKeyboardInterrupt received, exit.")
break
except Exception as ex:
print("Oops, {}".format(ex))
class MakeMovieShell(BaseCommand):
'''
take the make movie args and then call make movie command
with lazy imports
'''
def __init__(self):
self.deg_to_rad = math.pi / 180.0
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='makemovie')
parser.add_argument('--tub', help='The tub to make movie from')
parser.add_argument('--out', default='tub_movie.mp4', help='The movie filename to create. default: tub_movie.mp4')
parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')
parser.add_argument('--model', default=None, help='the model to use to show control outputs')
parser.add_argument('--type', default=None, help='the model type to load')
parser.add_argument('--salient', action="store_true", help='should we overlay salient map showing activations')
parser.add_argument('--start', type=int, default=0, help='first frame to process')
parser.add_argument('--end', type=int, default=-1, help='last frame to process')
parser.add_argument('--scale', type=int, default=2, help='make image frame output larger by X mult')
parsed_args = parser.parse_args(args)
return parsed_args, parser
def run(self, args):
'''
Load the images from a tub and create a movie from them.
Movie
'''
args, parser = self.parse_args(args)
from donkeycar.management.makemovie import MakeMovie
mm = MakeMovie()
mm.run(args, parser)
class TubCheck(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='tubcheck', usage='%(prog)s [options]')
parser.add_argument('tubs', nargs='+', help='paths to tubs')
parser.add_argument('--fix', action='store_true', help='remove problem records')
parser.add_argument('--delete_empty', action='store_true', help='delete tub dir with no records')
parsed_args = parser.parse_args(args)
return parsed_args
def check(self, tub_paths, fix=False, delete_empty=False):
'''
Check for any problems. Looks at tubs and find problems in any records or images that won't open.
If fix is True, then delete images and records that cause problems.
'''
cfg = load_config('config.py')
tubs = gather_tubs(cfg, tub_paths)
for tub in tubs:
tub.check(fix=fix)
if delete_empty and tub.get_num_records() == 0:
import shutil
print("removing empty tub", tub.path)
shutil.rmtree(tub.path)
def run(self, args):
args = self.parse_args(args)
self.check(args.tubs, args.fix, args.delete_empty)
class ShowHistogram(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='tubhist', usage='%(prog)s [options]')
parser.add_argument('--tub', nargs='+', help='paths to tubs')
parser.add_argument('--record', default=None, help='name of record to create histogram')
parsed_args = parser.parse_args(args)
return parsed_args
def show_histogram(self, tub_paths, record_name):
'''
Produce a histogram of record type frequency in the given tub
'''
from matplotlib import pyplot as plt
from donkeycar.parts.datastore import TubGroup
tg = TubGroup(tub_paths=tub_paths)
if record_name is not None:
tg.df[record_name].hist(bins=50)
else:
tg.df.hist(bins=50)
try:
filename = os.path.basename(tub_paths) + '_hist_%s.png' % record_name.replace('/', '_')
plt.savefig(filename)
print('saving image to:', filename)
except:
pass
plt.show()
def run(self, args):
args = self.parse_args(args)
args.tub = ','.join(args.tub)
self.show_histogram(args.tub, args.record)
class ShowTrack0(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='tubhist', usage='%(prog)s [options]')
parser.add_argument('--tub', nargs='+', help='paths to tubs')
parser.add_argument('--record', default=None, help='name of record to create histogram')
parsed_args = parser.parse_args(args)
return parsed_args
def show_histogram(self, tub_paths, record_name):
'''
Produce a histogram of record type frequency in the given tub
'''
from matplotlib import pyplot as plt
from donkeycar.parts.datastore import TubGroup
print("****************************************")
print("tub_paths: ", tub_paths)
print("record_name: ", record_name)
print("****************************************")
tg = TubGroup(tub_paths=tub_paths)
if record_name is not None:
tg.df[record_name].hist(bins=50)
else:
tg.df.hist(bins=50)
print("tg: ",tg)
try:
filename = os.path.basename(tub_paths) + '_hist_%s.png' % record_name.replace('/', '_')
plt.savefig(filename)
print('saving image to:', filename)
except:
pass
plt.show()
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='tubhist', usage='%(prog)s [options]')
parser.add_argument('--tub', nargs='+', help='paths to tubs')
parser.add_argument('--record', default=None, help='name of record to create histogram')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
args.tub = ','.join(args.tub)
self.show_histogram(args.tub, args.record)
class ShowTrack(BaseCommand):
def plot_tracks(self, cfg, tub_paths, limit):
'''
Plot track data from tubs.
usage
donkey tubtrack --tub data/tub_33_20-04-16 --config ./myconfig.py
'''
import matplotlib.pyplot as plt
import pandas as pd
records = gather_records(cfg, tub_paths)
user_angles = []
user_throttles = []
pilot_angles = []
pilot_throttles = []
pos_speeds = []
pos_pos_xs = []
pos_pos_ys = []
pos_pos_zs = []
pos_ctes = []
records = records[:limit]
num_records = len(records)
print('processing %d records:' % num_records)
for record_path in records:
with open(record_path, 'r') as fp:
record = json.load(fp)
user_angle = float(record["user/angle"])
user_throttle = float(record["user/throttle"])
user_angles.append(user_angle)
user_throttles.append(user_throttle)
pos_speed = float(record["pos/speed"])
pos_pos_x = float(record["pos/pos_x"])
pos_pos_y = float(record["pos/pos_y"])
pos_pos_z = float(record["pos/pos_z"])
pos_cte = float(record["pos/cte"])
pos_pos_xs.append(pos_pos_x)
pos_pos_ys.append(pos_pos_y)
pos_pos_zs.append(pos_pos_z)
pos_speeds.append(pos_speed)
pos_ctes.append(pos_cte)
angles_df = pd.DataFrame({'user_angle': user_angles, 'pilot_angle': user_angles})
throttles_df = pd.DataFrame({'user_throttle': user_throttles, 'pilot_throttle': user_throttles})
tracks_df = pd.DataFrame({'pos_x': pos_pos_xs, 'pos_z': pos_pos_zs})
speeds_df =
|
pd.DataFrame({'pos_speeds': pos_speeds, 'pos_ctes': pos_ctes})
|
pandas.DataFrame
|
import pandas
import random
stu_id = [i+j+k for k in [18000000000, 19000000000, 20000000000] for j in [307110000, 307130000, 300180000] for i in range(500)]
Roads = ["Avenue", "Street", "Road", "Lane"]
Cities = ["Tokyo", "Delhi", "Manila", "Sao Paulo", "Guangzhou", "Shanghai", "Beijing", "Los Angeles", "Bangkok",
"Seoul", "Buenos Aires", "Paris", "London", "Madrid", "Hong Kong"]
names = pandas.read_csv(r"..\playground\names.csv")
names = list(names['names'])
user = []
for stu in stu_id:
surname = random.choice(names)
lastname = random.choice(names)
user.append(
[surname + ' ' + lastname,
random.choices(["Male", "Female", "None"], [10, 10, 1], k=1)[0],
random.choice(['189', '186', '137', '191', '158']) + str(random.randint(10000000, 100000000)),
str(stu) + '@fudan.edu.cn',
str(random.randint(1, 999)) + ' ' + random.choice(names)[:6] + ' ' + random.choice(Roads) + ', ' + random.choice(
Cities),
random.choice([0, 1])])
user =
|
pandas.DataFrame(user, columns=['name', 'sex', 'phone', 'email', 'address', 'vip'])
|
pandas.DataFrame
|
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Load in the messages and categories datasets and merge them into one dataframe.
Args:
messages_filepath (str): path to the messages csv file
categories_filepath (str): path to the categories csv file
Returns:
(Pandas dataframe) merged data
'''
messages =
|
pd.read_csv(messages_filepath)
|
pandas.read_csv
|
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
import glob
import pandas as pd
import numpy as np
from sklearn import decomposition
import deprecated
import logging
sys.path.append(root_path)
from config.globalLog import logger
def generate_monoscale_samples(source_file, save_path, lags_dict, column, test_len, lead_time=1,regen=False):
"""Generate learning samples for autoregression problem using original time series.
Args:
'source_file' -- ['String'] The source data file path.
'save_path' --['String'] The path to restore the training, development and testing samples.
'lags_dict' -- ['int dict'] The lagged time for original time series.
'column' -- ['String']The column's name for read the source data by pandas.
'test_len' --['int'] The length of development and testing set.
'lead_time' --['int'] The lead time.
"""
logger.info('Generating muliti-step decomposition-ensemble hindcasting samples')
save_path = save_path+'/'+str(lead_time)+'_ahead_pacf/'
logger.info('Source file:{}'.format(source_file))
logger.info('Save path:{}'.format(save_path))
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# Load data from local dick
if '.xlsx' in source_file:
dataframe = pd.read_excel(source_file)[column]
elif '.csv' in source_file:
dataframe = pd.read_csv(source_file)[column]
# convert pandas dataframe to numpy array
nparr = np.array(dataframe)
# Create an empty pandas Dataframe
full_samples = pd.DataFrame()
# Generate input series based on lag and add these series to full dataset
lag = lags_dict['ORIG']
for i in range(lag):
x = pd.DataFrame(nparr[i:dataframe.shape[0] -
(lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
full_samples = pd.concat([full_samples, x], axis=1, sort=False)
# Generate label data
label = pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])
label = label.reset_index(drop=True)
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Add labled data to full_data_set
full_samples = pd.concat([full_samples, label], axis=1, sort=False)
# Get the length of this series
series_len = full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(series_len - test_len)]
# Get the testing set.
test_samples = full_samples[(series_len - test_len):series_len]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(series_len - test_len - test_len)]
dev_samples = full_samples[(
series_len - test_len - test_len):(series_len - test_len)]
assert (train_samples.shape[0] + dev_samples.shape[0] +
test_samples.shape[0]) == series_len
# Get the max and min value of each series
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Series length:{}'.format(series_len))
logger.info('Series length:{}'.format(series_len))
logger.info(
'Training-development sample size:{}'.format(train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+'norm_unsample_id.csv')
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_hindcast_samples(station, decomposer, lags_dict, input_columns, output_column, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate one step hindcast decomposition-ensemble learning samples.
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decompositin algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for each subsignal.
'input_columns'-- ['string list'] The input columns' name used for generating the learning samples.
'output_columns'-- ['string'] The output column's name used for generating the learning samples.
'test_len'-- ['int'] The size of development and testing samples ().
"""
logger.info('Generating one-step decomposition ensemble hindcasting samples')
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"one_step_"+str(lead_time)+"_ahead_hindcast_pacf/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
decompose_file = data_path+decomposer.upper()+"_FULL.csv"
decompositions = pd.read_csv(decompose_file)
# Drop NaN
decompositions.dropna()
# Get the input data (the decompositions)
input_data = decompositions[input_columns]
# Get the output data (the original time series)
output_data = decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
samples_size = data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each subsignal
full_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
oness = pd.DataFrame()
lag = lags_dict[input_columns[i]]
for j in range(lag):
x = pd.DataFrame(one_in[j:data_size-(lag-j)],
columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
# make all sample size of each subsignal identical
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
full_samples = pd.concat([full_samples, oness], axis=1, sort=False)
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Concat the features and target
full_samples = pd.concat([full_samples, target], axis=1, sort=False)
full_samples = pd.DataFrame(full_samples.values, columns=samples_cols)
full_samples.to_csv(save_path+'full_samples.csv')
assert samples_size == full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(samples_size - test_len)]
# Get the testing set.
test_samples = full_samples[(samples_size - test_len):samples_size]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(samples_size - test_len - test_len)]
dev_samples = full_samples[(
samples_size - test_len - test_len):(samples_size - test_len)]
assert (train_samples['X1'].size + dev_samples['X1'].size +
test_samples['X1'].size) == samples_size
# Get the max and min value of training set
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Save path:{}'.format(save_path))
logger.info('Series length:{}'.format(samples_size))
logger.info('Training and development sample size:{}'.format(
train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+'norm_unsample_id.csv')
train_samples.to_csv(save_path + 'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path + 'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_forecast_samples_triandev_test(station, decomposer, lags_dict, input_columns, output_column, start, stop, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate one step forecast decomposition-ensemble samples.
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decompositin algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for subsignals.
'input_columns'-- ['string lsit'] the input columns' name for read the source data by pandas.
'output_columns'-- ['string'] the output column's name for read the source data by pandas.
'start'-- ['int'] The start index of appended decomposition file.
'stop'-- ['int'] The stop index of appended decomposotion file.
'test_len'-- ['int'] The size of development and testing samples.
"""
logger.info(
'Generateing one-step decomposition ensemble forecasting samples (traindev-test pattern)')
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Validation start index:{}'.format(start))
logger.info('Validation stop index:{}'.format(stop))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf_traindev_test/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# !!!!!!Generate training samples
traindev_decompose_file = data_path+decomposer.upper()+"_TRAINDEV.csv"
traindev_decompositions = pd.read_csv(traindev_decompose_file)
# Drop NaN
traindev_decompositions.dropna()
# Get the input data (the decompositions)
traindev_input_data = traindev_decompositions[input_columns]
# Get the output data (the original time series)
traindev_output_data = traindev_decompositions[output_column]
# Get the number of input features
subsignals_num = traindev_input_data.shape[1]
# Get the data size
traindev_data_size = traindev_input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
traindev_samples_size = traindev_data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each input feature
train_dev_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one input feature
one_in = (traindev_input_data[input_columns[i]]).values # subsignal
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame() # restor input features
for j in range(lag):
x = pd.DataFrame(one_in[j:traindev_data_size-(lag-j)],
columns=['X' + str(j + 1)])['X' + str(j + 1)]
x = x.reset_index(drop=True)
oness = pd.DataFrame(pd.concat([oness, x], axis=1))
oness = oness.iloc[oness.shape[0]-traindev_samples_size:]
oness = oness.reset_index(drop=True)
train_dev_samples = pd.DataFrame(
pd.concat([train_dev_samples, oness], axis=1))
# Get the target
target = (traindev_output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
train_dev_samples = train_dev_samples[:traindev_samples_size-(lead_time-1)]
train_dev_samples = train_dev_samples.reset_index(drop=True)
# Concat the features and target
train_dev_samples = pd.concat([train_dev_samples, target], axis=1)
train_dev_samples = pd.DataFrame(
train_dev_samples.values, columns=samples_cols)
train_dev_samples.to_csv(save_path+'train_dev_samples.csv')
train_samples = train_dev_samples[:train_dev_samples.shape[0]-120]
dev_samples = train_dev_samples[train_dev_samples.shape[0]-120:]
assert traindev_samples_size == train_dev_samples.shape[0]
# normalize the train_samples
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = pd.DataFrame()
appended_file_path = data_path+decomposer+"-test/"
for k in range(start, stop+1):
# Load data from local dick
appended_decompositions = pd.read_csv(
appended_file_path+decomposer+'_appended_test'+str(k)+'.csv')
# Drop NaN
appended_decompositions.dropna()
# Get the input data (the decompositions)
input_data = appended_decompositions[input_columns]
# Get the output data (the original time series)
output_data = appended_decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
samples_size = data_size-max_lag
# Generate input colmuns for each subsignal
appended_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame()
for j in range(lag):
x = pd.DataFrame(
one_in[j:data_size-(lag-j)], columns=['X' + str(j + 1)])['X' + str(j + 1)]
x = x.reset_index(drop=True)
oness = pd.DataFrame(pd.concat([oness, x], axis=1))
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
appended_samples = pd.DataFrame(
pd.concat([appended_samples, oness], axis=1))
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
appended_samples = appended_samples[:
appended_samples.shape[0]-(lead_time-1)]
appended_samples = appended_samples.reset_index(drop=True)
# Concat the features and target
appended_samples = pd.concat([appended_samples, target], axis=1)
appended_samples = pd.DataFrame(
appended_samples.values, columns=samples_cols)
# Get the last sample of full samples
last_sample = appended_samples.iloc[appended_samples.shape[0]-1:]
test_samples = pd.concat([test_samples, last_sample], axis=0)
test_samples = test_samples.reset_index(drop=True)
test_samples.to_csv(save_path+'test_samples.csv')
test_samples = 2*(test_samples-series_min)/(series_max-series_min)-1
assert test_len == test_samples.shape[0]
logger.info('Save path:{}'.format(save_path))
logger.info('The size of training samples:{}'.format(
train_samples.shape[0]))
logger.info('The size of development samples:{}'.format(
dev_samples.shape[0]))
logger.info('The size of testing samples:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+"norm_unsample_id.csv")
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_forecast_samples(station, decomposer, lags_dict, input_columns, output_column, start, stop, test_len,
wavelet_level="db10-2", lead_time=1, mode='PACF', pre_times=20, filter_boundary=0.2, n_components=None,regen=False):
"""
Generate one step forecast decomposition-ensemble samples based on
Partial autocorrelation function (PACF), Pearson coefficient correlation (pearson).
Set n_components to 'mle' or an integer to perform principle component analysis (PCA).
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decomposition algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for subsignals in 'PACF' mode.
'input_columns'-- ['string list'] the input columns' name for read the source data by pandas.
'output_column'-- ['string'] the output column's name for read the source data by pandas.
'start'-- ['int'] The start index of appended decomposition file.
'stop'-- ['int'] The stop index of appended decomposition file.
'test_len'-- ['int'] The size of development and testing samples.
'wavelet_level'-- ['String'] The mother wavelet and decomposition level of DWT.
'lead_time'-- ['int'] The lead time for auto regression models.
'mode'-- ['String'] The samples generation mode, i.e., "PACF" and "Pearson", for auto regression models.
'pre_times'-- ['int'] The lag times for compute Pearson coefficient correlation.
'filter_boundary'-- ['float'] The filter threshold of Pearson coefficient correlation for selecting input predictors.
'n_components'-- ['String or int'] The number of reserved components in PCA. If n_components is set to None, PCA will not be performed.
"""
logger.info(
"Generateing one-step decomposition ensemble forecasting samples (train-devtest pattern)")
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Validation start index:{}'.format(start))
logger.info('Validation stop index:{}'.format(stop))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
logger.info('Generation mode:{}'.format(mode))
logger.info('Selected previous lag times:{}'.format(pre_times))
logger.info(
'Filter threshold of predictors selection:{}'.format(filter_boundary))
logger.info('Number of components for PCA:{}'.format(n_components))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
if mode == 'PACF' and n_components == None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf/"
elif mode == 'PACF' and n_components != None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf_pca"+str(n_components)+"/"
elif mode == 'Pearson' and n_components == None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pearson"+str(filter_boundary)+"/"
elif mode == 'Pearson' and n_components != None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pearson" + \
str(filter_boundary)+"_pca"+str(n_components)+"/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# !!!!!!Generate training samples
if mode == 'PACF':
train_decompose_file = data_path+decomposer.upper()+"_TRAIN.csv"
train_decompositions = pd.read_csv(train_decompose_file)
# Drop NaN
train_decompositions.dropna()
# Get the input data (the decompositions)
train_input_data = train_decompositions[input_columns]
# Get the output data (the original time series)
train_output_data = train_decompositions[output_column]
# Get the number of input features
subsignals_num = train_input_data.shape[1]
# Get the data size
train_data_size = train_input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
logger.debug('max lag:{}'.format(max_lag))
train_samples_size = train_data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each input feature
train_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one input feature
one_in = (train_input_data[input_columns[i]]).values # subsignal
lag = lags_dict[input_columns[i]]
logger.debug('lag:{}'.format(lag))
oness = pd.DataFrame() # restor input features
for j in range(lag):
x = pd.DataFrame(one_in[j:train_data_size-(lag-j)], columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
logger.debug("oness:\n{}".format(oness))
oness = oness.iloc[oness.shape[0]-train_samples_size:]
oness = oness.reset_index(drop=True)
train_samples = pd.concat([train_samples, oness], axis=1, sort=False)
# Get the target
target = (train_output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
# Concat the features and target
train_samples = train_samples[:train_samples.shape[0]-(lead_time-1)]
train_samples = train_samples.reset_index(drop=True)
train_samples = pd.concat([train_samples, target], axis=1)
train_samples = pd.DataFrame(train_samples.values, columns=samples_cols)
train_samples.to_csv(save_path+'train_samples.csv')
# assert train_samples_size == train_samples.shape[0]
# !!!!!!!!!!!Generate development and testing samples
dev_test_samples = pd.DataFrame()
appended_file_path = data_path+decomposer+"-test/"
for k in range(start, stop+1):
# Load data from local dick
appended_decompositions = pd.read_csv(
appended_file_path+decomposer+'_appended_test'+str(k)+'.csv')
# Drop NaN
appended_decompositions.dropna()
# Get the input data (the decompositions)
input_data = appended_decompositions[input_columns]
# Get the output data (the original time series)
output_data = appended_decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
samples_size = data_size-max_lag
# Generate input colmuns for each subsignal
appended_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame()
for j in range(lag):
x = pd.DataFrame(
one_in[j:data_size-(lag-j)], columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
appended_samples = pd.concat(
[appended_samples, oness], axis=1, sort=False)
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
# Concat the features and target
appended_samples = appended_samples[:
appended_samples.shape[0]-(lead_time-1)]
appended_samples = appended_samples.reset_index(drop=True)
appended_samples = pd.concat(
[appended_samples, target], axis=1, sort=False)
appended_samples = pd.DataFrame(
appended_samples.values, columns=samples_cols)
# Get the last sample of full samples
last_sample = appended_samples.iloc[appended_samples.shape[0]-1:]
dev_test_samples = pd.concat(
[dev_test_samples, last_sample], axis=0)
dev_test_samples = dev_test_samples.reset_index(drop=True)
dev_test_samples.to_csv(save_path+'dev_test_samples.csv')
dev_samples = dev_test_samples.iloc[0: dev_test_samples.shape[0]-test_len]
test_samples = dev_test_samples.iloc[dev_test_samples.shape[0]-test_len:]
if n_components != None:
logger.info('Performa PCA on samples based on PACF')
samples = pd.concat([train_samples, dev_samples, test_samples], axis=0, sort=False)
samples = samples.reset_index(drop=True)
y = samples['Y']
X = samples.drop('Y', axis=1)
logger.debug('X contains Nan:{}'.format(X.isnull().values.any()))
logger.debug("Input features before PAC:\n{}".format(X))
pca = decomposition.PCA(n_components=n_components)
pca.fit(X)
pca_X = pca.transform(X)
columns = []
for i in range(1, pca_X.shape[1]+1):
columns.append('X'+str(i))
pca_X = pd.DataFrame(pca_X, columns=columns)
logger.debug("Input features after PAC:\n{}".format(pca_X.tail()))
pca_samples = pd.concat([pca_X, y], axis=1)
train_samples = pca_samples.iloc[:train_samples.shape[0]]
train_samples = train_samples.reset_index(drop=True)
logger.debug('Training samples after PCA:\n{}'.format(train_samples))
dev_samples = pca_samples.iloc[train_samples.shape[0]:train_samples.shape[0]+dev_samples.shape[0]]
dev_samples = dev_samples.reset_index(drop=True)
logger.debug('Development samples after PCA:\n{}'.format(dev_samples))
test_samples = pca_samples.iloc[train_samples.shape[0] +dev_samples.shape[0]:]
test_samples = test_samples.reset_index(drop=True)
logger.debug('Testing samples after PCA:\n{}'.format(test_samples))
# Normalize each series to the range between -1 and 1
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples-series_min) / \
(series_max-series_min) - 1
test_samples = 2 * (test_samples-series_min) / \
(series_max-series_min) - 1
logger.info('Save path:{}'.format(save_path))
logger.info('The size of training samples:{}'.format(
train_samples.shape[0]))
logger.info('The size of development samples:{}'.format(
dev_samples.shape[0]))
logger.info('The size of testing samples:{}'.format(
test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min =
|
pd.DataFrame(series_min, columns=['series_min'])
|
pandas.DataFrame
|
from numpy import dtype
def estado_civil_dummy():
dic_estado={"Separado(a) o divorciado(a)":0,
"Soltero(a)":0,"Casado":1,"En unión libre":1,
"Viudo(a)":0,1.0:1,2.0:1,3.0:0,4.0:0,5.0:0}
return dic_estado
def dic_etnia():
import numpy as np
dic_etnia={"Mestizo":1,'Ninguno de los anteriores':0,"Blanco":1,"Indígena":0,"Negro, mulato (afro descendiente)":1,
"Palenquero":1,np.NaN:0,1.0:1,2.0:1,3.0:1,4.0:1,5.0:1,6.0:1,7.0:1,8.0:0}
return dic_etnia
def cols_names():
names_cols={"actividad_ppal":"employment","sexo":"sex","edad":"age","estado_civil":"couple",
"hijos":"sons","etnia":"ethnicity","Discapacidad":"Disability","educ_años":"educ_years",
"embarazo_hoy":"w_pregnant","lee_escribe":"read_write","estudia":"student",
"n_internet":"internet","Urbano":"Urban"}
return names_cols
def creador_id(data):
try:
data.insert(0,"id",data["DIRECTORIO"]+data["SECUENCIA_P"]+data["ORDEN"]+data["HOGAR"])
data.insert(1,"id_hogar",data["DIRECTORIO"]+data["SECUENCIA_P"])
except:
data.insert(0,"id_hogar",data["DIRECTORIO"]+data["SECUENCIA_P"])
def dic_dtypes():
dtype={"DIRECTORIO":"str",
"SECUENCIA_P":"str",
"ORDEN":"str",
"HOGAR":"str"}
return dtype
def variables_modelo():
variables=["id","id_hogar","ocupado","desocupado","P6020","P6040","ESC","P6080","P6070","P6170","P4030S1A1","P5210S16","P5210S3","P6081","P6083","DPTO_x"]
return variables
def procces_data_month(mes,variables):
import pandas as pd
dtype=dic_dtypes()
Ac=pd.read_csv(f"sets_model/{mes}/Acaracteristicas.csv",sep=";",dtype=dtype)
Ao=
|
pd.read_csv(f"sets_model/{mes}/Aocupados.csv",sep=";",dtype=dtype)
|
pandas.read_csv
|
import os
os.environ["MKL_NUM_THREADS"] = "1" # must be done before numpy import!!
os.environ["NUMEXPR_NUM_THREADS"] = "1" # must be done before numpy import!!
os.environ["OMP_NUM_THREADS"] = "1" # must be done before numpy import!!
import sys
import time
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_predict, train_test_split
import sktime.classifiers.ensemble as ensemble
import sktime.contrib.dictionary_based.boss_ensemble as db
import sktime.contrib.frequency_based.rise as fb
import sktime.contrib.interval_based.tsf as ib
from sktime.classifiers.proximity import ProximityForest
from sktime.utils.load_data import load_from_tsfile_to_dataframe as load_ts
__author__ = "<NAME>"
""" Prototype mechanism for testing classifiers on the UCR format. This mirrors the mechanism use in Java,
https://github.com/TonyBagnall/uea-tsc/tree/master/src/main/java/experiments
but is not yet as engineered. However, if you generate results using the method recommended here, they can be directly
and automatically compared to the results generated in java
Will have both low level version and high level orchestration version soon.
"""
datasets = [
"GunPoint",
"ItalyPowerDemand",
"ArrowHead",
"Coffee",
"Adiac",
"Beef",
"BeetleFly",
"BirdChicken",
"Car",
"CBF",
"ChlorineConcentration",
"CinCECGTorso",
"Computers",
"CricketX",
"CricketY",
"CricketZ",
"DiatomSizeReduction",
"DistalPhalanxOutlineCorrect",
"DistalPhalanxOutlineAgeGroup",
"DistalPhalanxTW",
"Earthquakes",
"ECG200",
"ECG5000",
"ECGFiveDays",
# "ElectricDevices",
"FaceAll",
"FaceFour",
"FacesUCR",
"FiftyWords",
"Fish",
# "FordA",
# "FordB",
"Ham",
# "HandOutlines",
"Haptics",
"Herring",
"InlineSkate",
"InsectWingbeatSound",
"LargeKitchenAppliances",
"Lightning2",
"Lightning7",
"Mallat",
"Meat",
"MedicalImages",
"MiddlePhalanxOutlineCorrect",
"MiddlePhalanxOutlineAgeGroup",
"MiddlePhalanxTW",
"MoteStrain",
"NonInvasiveFetalECGThorax1",
"NonInvasiveFetalECGThorax2",
"OliveOil",
"OSULeaf",
"PhalangesOutlinesCorrect",
"Phoneme",
"Plane",
"ProximalPhalanxOutlineCorrect",
"ProximalPhalanxOutlineAgeGroup",
"ProximalPhalanxTW",
"RefrigerationDevices",
"ScreenType",
"ShapeletSim",
"ShapesAll",
"SmallKitchenAppliances",
"SonyAIBORobotSurface1",
"SonyAIBORobotSurface2",
# "StarlightCurves",
"Strawberry",
"SwedishLeaf",
"Symbols",
"SyntheticControl",
"ToeSegmentation1",
"ToeSegmentation2",
"Trace",
"TwoLeadECG",
"TwoPatterns",
"UWaveGestureLibraryX",
"UWaveGestureLibraryY",
"UWaveGestureLibraryZ",
"UWaveGestureLibraryAll",
"Wafer",
"Wine",
"WordSynonyms",
"Worms",
"WormsTwoClass",
"Yoga",
]
def set_classifier(cls, resampleId):
"""
Basic way of determining the classifier to build. To differentiate settings just and another elif. So, for example, if
you wanted tuned TSF, you just pass TuneTSF and set up the tuning mechanism in the elif.
This may well get superceded, it is just how e have always done it
:param cls: String indicating which classifier you want
:return: A classifier.
"""
if cls.lower() == 'pf':
return ProximityForest(rand = resampleId)
if cls == 'RISE' or cls == 'rise':
return fb.RandomIntervalSpectralForest(random_state = resampleId)
elif cls == 'TSF' or cls == 'tsf':
return ib.TimeSeriesForest(random_state = resampleId)
elif cls == 'BOSS' or cls == 'boss':
return db.BOSSEnsemble()
# elif classifier == 'EE' or classifier == 'ElasticEnsemble':
# return dist.ElasticEnsemble()
elif cls == 'TSF_Markus':
return ensemble.TimeSeriesForestClassifier()
else:
return 'UNKNOWN CLASSIFIER'
def run_experiment(problem_path, results_path, cls_name, dataset, resampleID=0, overwrite=False, format=".ts", train_file=False):
"""
Method to run a basic experiment and write the results to files called testFold<resampleID>.csv and, if required,
trainFold<resampleID>.csv.
:param problem_path: Location of problem files, full path.
:param results_path: Location of where to write results. Any required directories will be created
:param cls_name: determines which classifier to use, as defined in set_classifier. This assumes predict_proba is
implemented, to avoid predicting twice. May break some classifiers though
:param dataset: Name of problem. Files must be <problem_path>/<dataset>/<dataset>+"_TRAIN"+format, same for "_TEST"
:param resampleID: Seed for resampling. If set to 0, the default train/test split from file is used. Also used in output file name.
:param overwrite: if set to False, this will only build results if there is not a result file already present. If
True, it will overwrite anything already there
:param format: Valid formats are ".ts", ".arff" and ".long". For more info on format, see
https://github.com/alan-turing-institute/sktime/blob/master/examples/Loading%20Data%20Examples.ipynb
:param train_file: whether to generate train files or not. If true, it performs a 10xCV on the train and saves
:return:
"""
cls_name = cls_name.upper()
build_test = True
if not overwrite:
full_path = str(results_path)+"/"+str(cls_name)+"/Predictions/" + str(dataset) +"/testFold"+str(resampleID)+".csv"
if os.path.exists(full_path):
print(full_path+" Already exists and overwrite set to false, not building Test")
build_test=False
if train_file:
full_path = str(results_path) + "/" + str(cls_name) + "/Predictions/" + str(dataset) + "/trainFold" + str(
resampleID) + ".csv"
if os.path.exists(full_path):
print(full_path + " Already exists and overwrite set to false, not building Train")
train_file = False
if train_file == False and build_test ==False:
return
# TO DO: Automatically differentiate between problem types, currently only works with .ts
trainX, trainY = load_ts(problem_path + dataset + '/' + dataset + '_TRAIN' + format)
testX, testY = load_ts(problem_path + dataset + '/' + dataset + '_TEST' + format)
if resample !=0:
allLabels = np.concatenate((trainY, testY), axis = None)
allData =
|
pd.concat([trainX, testX])
|
pandas.concat
|
"""
This module contains main flow.
To generate submission run: `python main.py`
This is a regression approach explained here: https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107
"""
import pandas as pd
import numpy as np
from data_utils import get_dataset
from preprocessing import remove_object_cols
from models import kfold_lgb, get_logistic
from submission_utils import OptimizedRounder, generate_submission
from evaluation_utils import sklearn_quadratic_kappa
TARGET_COL = 'AdoptionSpeed'
if __name__ == '__main__':
# step 1 - load and transform data
# load train and test tabular datasets
datasets = {dataset_type: get_dataset(dataset_type) for dataset_type in ('train', 'test')}
# remove all string columns from dataset
# todo: investigate if there are no int/float categorical cols left that hasn't been one-hot encoded
cleaned_datasets = {dataset_type: remove_object_cols(dataset) for dataset_type, dataset in datasets.items()}
# extract training labels
y_train = cleaned_datasets['train'][TARGET_COL]
print(cleaned_datasets)
# step 2 - train a model and get it's outputs
# get outputs from k-fold CV LGBM training
outputs = kfold_lgb(cleaned_datasets)
outputs1 = get_logistic(cleaned_datasets)
# step 3 - round the outputs, compute quadratic kappa and generate submission
# initialize and train OptimizedRounder
optR = OptimizedRounder()
optR.fit(outputs['train'], y_train.values)
# get rounding coefficients
coefficients = optR.coefficients()
# round outputs for training/test set
rounded_train_outputs = optR.predict(outputs['train'], coefficients).astype(int)
rounded_test_outputs = optR.predict(outputs['test'].mean(axis=1), coefficients).astype(int)
# compute quadratic kappa for train set and print it
qwk_train = sklearn_quadratic_kappa(y_train.values, rounded_train_outputs)
print(f"\nTrain QWK: {qwk_train}")
# compare models using chi sq metrics
model_comparison = compare_models_chisq(rounded_test_outputs, np.around(outputs1['test']))
# print distributions of predictions vs. true distributions
print("\nTrue Distribution:")
print(pd.value_counts(y_train, normalize=True).sort_index())
print("\nTrain Predicted Distribution:")
print(
|
pd.value_counts(rounded_train_outputs, normalize=True)
|
pandas.value_counts
|
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import zip
from builtins import range
import pickle
import codecs
import numpy as np
import scipy.sparse as sparse
import subprocess
import tempfile
from collections import namedtuple
from pandas import DataFrame, Series
from fonduer.snorkel.annotations import FeatureAnnotator
from fonduer.snorkel.models import Candidate
from fonduer.snorkel.models.meta import *
from fonduer.snorkel.udf import UDF, UDFRunner
from fonduer.snorkel.utils import (
matrix_conflicts,
matrix_coverage,
matrix_overlaps,
matrix_tp,
matrix_fp,
matrix_fn,
matrix_tn
)
from fonduer.snorkel.utils import remove_files
from fonduer.features.features import get_all_feats, get_organic_image_feats
# Used to conform to existing annotation key API call
# Note that this anontation matrix class can not be replaced with snorkel one
# since we do not have ORM-backed key objects but rather a simple python list.
_TempKey = namedtuple('TempKey', ['id', 'name'])
def _to_annotation_generator(fns):
""""
Generic method which takes a set of functions, and returns a generator that yields
function.__name__, function result pairs.
"""
def fn_gen(c):
for f in fns:
yield f.__name__, f(c)
return fn_gen
class csr_AnnotationMatrix(sparse.csr_matrix):
"""
An extension of the scipy.sparse.csr_matrix class for holding sparse annotation matrices
and related helper methods.
"""
def __init__(self, arg1, **kwargs):
# # Note: Currently these need to return None if unset, otherwise matrix copy operations break...
# self.session = SnorkelSession()
# Map candidate id to row id
self.candidate_index = kwargs.pop('candidate_index', {})
# Map row id to candidate id
self.row_index = kwargs.pop('row_index', [])
# Map col id to key str
self.keys = kwargs.pop('keys', [])
# Map key str to col number
self.key_index = kwargs.pop('key_index', {})
# Note that scipy relies on the first three letters of the class to define matrix type...
super(csr_AnnotationMatrix, self).__init__(arg1, **kwargs)
def get_candidate(self, session, i):
"""Return the Candidate object corresponding to row i"""
return session.query(Candidate)\
.filter(Candidate.id == self.row_index[i]).one()
def get_row_index(self, candidate):
"""Return the row index of the Candidate"""
return self.candidate_index[candidate.id]
def get_key(self, j):
"""Return the AnnotationKey object corresponding to column j"""
return _TempKey(j, self.keys[j])
def get_col_index(self, key):
"""Return the cow index of the AnnotationKey"""
return self.key_index[key.id]
def stats(self):
"""Return summary stats about the annotations"""
raise NotImplementedError()
def lf_stats(self, labels=None, est_accs=None):
"""Returns a pandas DataFrame with the LFs and various per-LF statistics"""
lf_names = self.keys
# Default LF stats
col_names = ['j', 'Coverage', 'Overlaps', 'Conflicts']
d = {
'j' : list(range(self.shape[1])),
'Coverage' : Series(data=matrix_coverage(self), index=lf_names),
'Overlaps' : Series(data=matrix_overlaps(self), index=lf_names),
'Conflicts' : Series(data=matrix_conflicts(self), index=lf_names)
}
if labels is not None:
col_names.extend(['TP', 'FP', 'FN', 'TN', 'Empirical Acc.'])
ls = np.ravel(labels.todense() if sparse.issparse(labels) else labels)
tp = matrix_tp(self, ls)
fp = matrix_fp(self, ls)
fn = matrix_fn(self, ls)
tn = matrix_tn(self, ls)
ac = (tp+tn) / (tp+tn+fp+fn)
d['Empirical Acc.'] = Series(data=ac, index=lf_names)
d['TP'] = Series(data=tp, index=lf_names)
d['FP'] = Series(data=fp, index=lf_names)
d['FN'] = Series(data=fn, index=lf_names)
d['TN'] = Series(data=tn, index=lf_names)
if est_accs is not None:
col_names.append('Learned Acc.')
d['Learned Acc.'] =
|
Series(data=est_accs, index=lf_names)
|
pandas.Series
|
# Map exists which all robot particles operate in
# Particles each have a motion model and a measurement model
# Need to sample:
# Motion model for particle (given location of particle, map)
# Motion model (in this case) comes from log + noise.
# Measurement model for particle (given location, map)
# True measurements come from log
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import copy
from scipy.spatial import distance
import base64
from IPython.display import HTML
import montecarlo_localization as mcl
plt.style.use('ggplot')
import matplotlib.animation as animation
wean_hall_map = mcl.occupancy_map('data/map/wean.dat')
logdata = mcl.load_log('data/log/robotdata1.log.gz')
#Initialize 100 particles uniformly in valid locations on the map
laser = mcl.laser_sensor(stdv_cm=20, uniform_weight=0.2)
particle_list = [mcl.robot_particle(wean_hall_map, laser, log_prob_descale=50,
sigma_fwd_pct=0.3, sigma_theta_pct=0.2)
for _ in range(30000)]
scan_data_gen = (msg for msg in logdata.query('type > 0.1').values)
fig, ax = plt.subplots(figsize=(40,40))
#mcl.draw_map_state(wean_hall_map, particle_list[::20], ax=ax)
new_particle_list = mcl.mcl_update(particle_list, next(scan_data_gen))
weights =
|
pd.Series([p.weight for p in new_particle_list])
|
pandas.Series
|
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler
from Load_data import LoadDataset
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import torch
import plotly.graph_objects as go
def getDLo(x, y, params):
"""Given the inputs, labels and dataloader parameters, returns a pytorch dataloader
Args:
x ([list]): [inputs list]
y ([list]): [target variable list]
params ([dict]): [Parameters pertaining to dataloader eg. batch size]
"""
training_set = LoadDataset(x, y)
training_generator = torch.utils.data.DataLoader(training_set, **params)
return training_generator
def train_val_split(x, y, train_pct):
"""Given the input x and output labels y, splits the dataset into train, validation and test datasets
Args:
x ([list]): [A list of all the input sequences]
y ([list]): [A list of all the outputs (floats)]
train_pct ([float]): [% of data in the test set]
"""
# Perform a train test split (It will be sequential here since we're working with time series data)
N = len(x)
trainX = x[:int(train_pct * N)]
trainY = y[:int(train_pct * N)]
valX = x[int(train_pct * N):]
valY = y[int(train_pct * N):]
trainX = torch.from_numpy(trainX).float()
trainY = torch.from_numpy(trainY).float()
valX = torch.from_numpy(valX).float()
valY = torch.from_numpy(valY).float()
return (trainX, trainY, valX, valY)
def standardizeData(X, SS = None, train = False):
"""Given a list of input features, standardizes them to bring them onto a homogenous scale
Args:
X ([dataframe]): [A dataframe of all the input values]
SS ([object], optional): [A StandardScaler object that holds mean and std of a standardized dataset]. Defaults to None.
train (bool, optional): [If False, means validation set to be loaded and SS needs to be passed to scale it]. Defaults to False.
"""
if train:
SS = StandardScaler()
new_X = SS.fit_transform(X)
return (new_X, SS)
else:
new_X = SS.transform(X)
return (new_X, None)
def time_series_plot(census_path, theft_path = "./Data/Bicycle_Thefts_Toronto_geo.csv", year = None, threshold = None, isprint = True):
# Read data and clean
df_census = pd.read_csv(census_path, index_col = 0, dtype = {"GeoUID":str})
df_census = df_census[df_census["Region Name"] == "Toronto"]
df_census = df_census[["GeoUID", "Area (sq km)", "v_CA16_5807: Bicycle"]].replace({"x":np.nan, "F":np.nan}) # Keep important variables
df_census["GeoUID"] = df_census["GeoUID"].apply(lambda x: x if len(x.split(".")[-1]) > 1 else x + "0") # Fix important bug
for col in df_census: # Type change
df_census[col] = df_census[col].astype(float) if col != "GeoUID" else df_census[col]
df_theft = pd.read_csv(theft_path, dtype = {"GeoUID":str})
df_theft["GeoUID"] = df_theft["GeoUID"].apply(lambda x: x if len(x.split(".")[-1]) > 1 else x + "0") # Fix important bug
# Data process: Get Total theft by sum registers and mean cost of bike by CT and date
df_theft["Occurrence_Date"] =
|
pd.to_datetime(df_theft["Occurrence_Date"])
|
pandas.to_datetime
|
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import PeriodArray, period_array
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
([pd.Period("2017", "D"), None], None, [17167, iNaT]),
(pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
(pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
(pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
],
)
def test_period_array_ok(data, freq, expected):
result = period_array(data, freq=freq).asi8
expected = np.asarray(expected, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_period_array_readonly_object():
# https://github.com/pandas-dev/pandas/issues/25403
pa = period_array([pd.Period("2019-01-01")])
arr = np.asarray(pa, dtype="object")
arr.setflags(write=False)
result = period_array(arr)
tm.assert_period_array_equal(result, pa)
result = pd.Series(arr)
tm.assert_series_equal(result, pd.Series(pa))
result = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
result = PeriodArray._from_datetime64(arr, freq="M")
expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize(
"data, freq, msg",
[
(
[
|
pd.Period("2017", "D")
|
pandas.Period
|
# Copyright (c) 2022. RadonPy developers. All rights reserved.
# Use of this source code is governed by a BSD-3-style
# license that can be found in the LICENSE file.
# ******************************************************************************
# sim.helper module
# ******************************************************************************
import os
import re
import datetime
import shutil
import platform
import importlib
import pandas as pd
import rdkit
from ..core import utils
from ..__init__ import __version__ as radonpy_ver
from .lammps import LAMMPS
psi4_ver = None
try:
from psi4 import __version__ as psi4_ver
except ImportError:
psi4_ver = None
__version__ = '0.2.0'
class Pipeline_Helper():
def __init__(self, read_csv=None, load_monomer=False, **kwargs):
# Set default values
self.data = {
'DBID': os.environ.get('RadonPy_DBID'),
'monomer_ID': None,
'smiles_list': None,
'smiles_ter_1': '*C',
'smiles_ter_2': None,
'ter_ID_1': 'CH3',
'ter_ID_2': None,
'qm_method': 'wb97m-d3bj',
'charge': 'RESP',
'monomer_dir': None,
'copoly_ratio_list': '1',
'copoly_type': 'random',
'input_natom': 1000,
'input_nchain': 10,
'ini_density': 0.05,
'temp': 300.0,
'press': 1.0,
'input_tacticity': 'atactic',
'tacticity': None,
'remarks': '',
'date': None,
'Python_ver': None,
'RadonPy_ver': None,
'RDKit_ver': None,
'Psi4_ver': None,
'LAMMPS_ver': None,
}
# Set number of parallel
self.omp = int(os.environ.get('RadonPy_OMP', 1))
self.mpi = int(os.environ.get('RadonPy_MPI', utils.cpu_count()))
self.gpu = int(os.environ.get('RadonPy_GPU', 0))
self.retry_eq = int(os.environ.get('RadonPy_RetryEQ', 3))
self.psi4_omp = int(os.environ.get('RadonPy_OMP_Psi4', 4))
self.psi4_mem = int(os.environ.get('RadonPy_MEM_Psi4', 1000))
self.conf_mm_omp = int(os.environ.get('RadonPy_Conf_MM_OMP', 1))
self.conf_mm_mpi = int(os.environ.get('RadonPy_Conf_MM_MPI', utils.cpu_count()))
self.conf_mm_gpu = int(os.environ.get('RadonPy_Conf_MM_GPU', 0))
self.conf_mm_mp = int(os.environ.get('RadonPy_Conf_MM_MP', 0))
self.conf_psi4_omp = int(os.environ.get('RadonPy_Conf_Psi4_OMP', self.psi4_omp))
self.conf_psi4_mp = int(os.environ.get('RadonPy_Conf_Psi4_MP', 0))
# Set work, save, temp directories
self.work_dir = './%s' % self.data['DBID']
if not os.path.exists(self.work_dir):
os.makedirs(self.work_dir)
self.save_dir = os.path.join(self.work_dir, os.environ.get('RadonPy_Save_Dir', 'analyze'))
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.tmp_dir = os.environ.get('RadonPy_TMP_Dir', None)
if self.tmp_dir is not None and not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
self.mols = []
self.monomer_data = []
# Read csv data
if read_csv is not None:
self.read_csv(read_csv)
# Set input data
now = datetime.datetime.now()
self.indata = {
'DBID': os.environ.get('RadonPy_DBID'),
# 'Monomer_ID': os.environ.get('RadonPy_Monomer_ID', self.data['monomer_ID']),
# 'smiles_list': os.environ.get('RadonPy_SMILES', self.data['smiles_list']),
# 'smiles_ter_1': os.environ.get('RadonPy_SMILES_TER', self.data['smiles_ter_1']),
# 'smiles_ter_2': os.environ.get('RadonPy_SMILES_TER2', self.data['smiles_ter_2']),
# 'ter_ID_1': os.environ.get('RadonPy_TER_ID', self.data['ter_ID_1']),
# 'ter_ID_2': os.environ.get('RadonPy_TER_ID2', self.data['ter_ID_2']),
# 'qm_method': os.environ.get('RadonPy_QM_Method', self.data['qm_method']),
# 'charge': os.environ.get('RadonPy_Charge', self.data['charge']),
# 'monomer_dir': os.environ.get('RadonPy_Monomer_Dir', self.data['monomer_dir']),
# 'copoly_ratio_list': os.environ.get('RadonPy_Copoly_Ratio', self.data['copoly_ratio_list']),
# 'copoly_type': os.environ.get('RadonPy_Copoly_Type', self.data['copoly_type']),
# 'input_natom': int(os.environ.get('RadonPy_NAtom', self.data['input_natom'])),
# 'input_nchain': int(os.environ.get('RadonPy_NChain', self.data['input_nchain'])),
# 'ini_density': float(os.environ.get('RadonPy_Ini_Density', self.data['ini_density'])),
# 'temp': float(os.environ.get('RadonPy_Temp', self.data['temp'])),
# 'press': float(os.environ.get('RadonPy_Press', self.data['press'])),
# 'input_tacticity': os.environ.get('RadonPy_Tacticity', self.data['input_tacticity']),
# 'tacticity': str(''),
# Meta data are allways overwritten by new informations
'remarks': os.environ.get('RadonPy_Remarks', str('')),
'date': '%04i-%02i-%02i-%02i-%02i-%02i' % (now.year, now.month, now.day, now.hour, now.minute, now.second),
'Python_ver': platform.python_version(),
'RadonPy_ver': radonpy_ver,
'RDKit_ver': rdkit.__version__,
'Psi4_ver': psi4_ver,
'LAMMPS_ver': LAMMPS().get_version(),
}
envkeys = {
'RadonPy_Monomer_ID': 'monomer_ID',
'RadonPy_SMILES': 'smiles_list',
'RadonPy_SMILES_TER': 'smiles_ter_1',
'RadonPy_SMILES_TER2': 'smiles_ter_2',
'RadonPy_TER_ID': 'ter_ID_1',
'RadonPy_TER_ID2': 'ter_ID_1',
'RadonPy_QM_Method': 'qm_method',
'RadonPy_Charge': 'charge',
'RadonPy_Monomer_Dir': 'monomer_dir',
'RadonPy_TER_Dir': 'ter_dir',
'RadonPy_Copoly_Ratio': 'copoly_ratio_list',
'RadonPy_Copoly_Type': 'copoly_type',
'RadonPy_NAtom': 'input_natom',
'RadonPy_NChain': 'input_nchain',
'RadonPy_Ini_Density': 'ini_density',
'RadonPy_Temp': 'temp',
'RadonPy_Press': 'press',
'RadonPy_Tacticity': 'input_tacticity',
}
for k, v in envkeys.items():
if os.environ.get(k):
self.indata[v] = os.environ.get(k)
# Import preset modules
self.preset = type('', (), {})()
preset_dir = 'preset'
preset_files = os.listdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), preset_dir))
for pf in preset_files:
if pf.endswith('py'):
path = os.path.join(preset_dir, pf)
mname = os.path.splitext(os.path.basename(pf))[0]
if mname == '__init__':
continue
mpath = '..' + os.path.splitext(path)[0].replace(os.path.sep, '.')
try:
m = importlib.import_module(mpath, package=__name__)
except ImportError:
utils.radon_print('Cannot import %s' % mpath)
continue
setattr(self.preset, mname, m)
# Initialize preset options
if hasattr(m, '__version__'):
self.indata['preset_%s_ver' % mname] = m.__version__
if hasattr(m, 'helper_options'):
mop = m.helper_options()
for k, v in mop.items():
if k not in self.data.keys():
self.data[k] = v
if os.environ.get('RadonPy_%s' % k):
self.indata[k] = v
# Overwritten by input data
self.data.update(self.indata)
# Set monomer dir
self.monomer_dir = self.data['monomer_dir'] if self.data['monomer_dir'] else self.save_dir
# Parse smiles list
self.smi_list = self.data['smiles_list'].split(',') if self.data['smiles_list'] else []
# Set input of copolymer
if len(self.smi_list) == 1:
self.data['copoly_ratio_list'] = '1'
self.copoly_ratio = [1]
self.data['copoly_type'] = ''
else:
self.copoly_ratio = [float(x) for x in str(self.data['copoly_ratio_list']).split(',')]
# Set monomer ID
if self.data['monomer_ID']:
self.monomer_id = self.data['monomer_ID'].split(',')
else:
self.monomer_id = [None]*len(self.smi_list)
# Initialize monomer data
if load_monomer:
self.load_monomer_data()
elif len(self.monomer_data) == 0:
for i, smi in enumerate(self.smi_list):
mon = {
'monomer_ID': None,
'smiles': smi,
'qm_method': self.data['qm_method'],
'charge': self.data['charge'],
'copoly_ratio': self.copoly_ratio[i],
'remarks': self.data['remarks'],
'date': self.data['date'],
'Python_ver': self.data['Python_ver'],
'RadonPy_ver': self.data['RadonPy_ver'],
'RDKit_ver': self.data['RDKit_ver'],
'Psi4_ver': self.data['Psi4_ver'],
}
if self.data['monomer_ID']:
mon['monomer_ID'] = self.monomer_id[i]
self.data['monomer_ID_%i' % (i+1)] = self.monomer_id[i]
self.data['smiles_%i' % (i+1)] = smi
self.monomer_data.append(mon)
def read_csv(self, file='results.csv', overwrite=True):
if not os.path.isfile(os.path.join(self.save_dir, file)):
utils.radon_print('Cannot find monomer data.', level=3)
df = pd.read_csv(os.path.join(self.save_dir, file), index_col=0)
data = df.iloc[0].to_dict()
data['DBID'] = df.index.tolist()[0]
if 'remarks' in data.keys() and data['remarks'] is None:
data['remarks'] = str('')
if data['DBID'] != self.data['DBID']:
utils.radon_print('DBID in %s (%s) does not match the input DBID (%s).'
% (file, data['DBID'], self.data['DBID']), level=3)
if overwrite:
self.data = {**self.data, **data}
else:
self.data = {**data, **self.data}
if len(self.monomer_data) == 0:
now = datetime.datetime.now()
mon = {
'monomer_ID': None,
'smiles': None,
'qm_method': 'wb97m-d3bj',
'charge': 'RESP',
'copoly_ratio': None,
'remarks': '',
'date': '%i-%i-%i-%i-%i-%i' % (now.year, now.month, now.day, now.hour, now.minute, now.second),
'Python_ver': None,
'RadonPy_ver': None,
'RDKit_ver': None,
'Psi4_ver': None,
}
smi_list = self.data['smiles_list'].split(',')
self.monomer_data = [mon for x in range(len(smi_list))]
for k, v in self.data.items():
if re.search('_monomer\d+', k):
m = re.search('(.+)_monomer(\d+)', k)
key = str(m.group(1))
idx = int(m.group(2))-1
self.monomer_data[idx][key] = v
elif re.search('smiles_\d+', k):
m = re.search('smiles_(\d+)', k)
idx = int(m.group(1))-1
self.monomer_data[idx]['smiles'] = v
elif re.search('monomer_ID_\d+', k):
m = re.search('monomer_ID_(\d+)', k)
idx = int(m.group(1))-1
self.monomer_data[idx]['monomer_ID'] = v
def to_csv(self, file='results.csv'):
now = datetime.datetime.now()
self.data['date'] = '%04i-%02i-%02i-%02i-%02i-%02i' % (now.year, now.month, now.day, now.hour, now.minute, now.second)
if os.path.isfile(os.path.join(self.save_dir, file)):
shutil.copyfile(os.path.join(self.save_dir, file),
os.path.join(self.save_dir, '%s_%04i-%02i-%02i-%02i-%02i-%02i.csv' % (file, now.year, now.month, now.day, now.hour, now.minute, now.second)))
df = pd.DataFrame(self.data, index=[0]).set_index('DBID')
df.to_csv(os.path.join(self.save_dir, file))
def read_monomer_csv(self, idx, file, overwrite=True):
monomer_df = pd.read_csv(os.path.join(self.monomer_dir, file), index_col=0)
mon_data = {'monomer_ID': monomer_df.index.tolist()[0], **monomer_df.iloc[0].to_dict()}
if overwrite:
self.monomer_data[idx] = {**self.monomer_data[idx], **mon_data}
else:
self.monomer_data[idx] = {**mon_data, **self.monomer_data[idx]}
for k in self.monomer_data[idx].keys():
if k == 'monomer_ID':
self.data['monomer_ID_%i' % (idx+1)] = self.monomer_data[idx]['monomer_ID']
elif k == 'smiles':
self.data['smiles_%i' % (idx+1)] = self.monomer_data[idx]['smiles']
else:
self.data['%s_monomer%i' % (k, idx+1)] = self.monomer_data[idx][k]
def to_monomer_csv(self, idx, file=None):
now = datetime.datetime.now()
self.monomer_data[idx]['date'] = '%04i-%02i-%02i-%02i-%02i-%02i' % (now.year, now.month, now.day, now.hour, now.minute, now.second)
if self.data['monomer_ID']:
data_df =
|
pd.DataFrame(self.monomer_data[idx], index=[0])
|
pandas.DataFrame
|
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column_name = "Modification Type(s)"
observed_mz = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def calculate_proportion(self, occupancy=True):
df = self.df.copy()
#print(df)
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = [# "Isoform",
"Peptides", "Position"]
else:
gr = [# "Isoform",
"Position"]
for _, g in df.groupby(gr):
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
if df is None:
df = self.df
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index([# "Isoform",
"Position", "Glycans"])
else:
temp = df.set_index([# "Isoform",
"Peptides", "Glycans", "Position"])
temp.rename(columns={"Value": name}, inplace=True)
return temp
class GlypnirOComponent:
def __init__(self, filename, area_filename, replicate_id, condition_id, protein_name, minimum_score=0, trust_byonic=False, legacy=False):
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
data = pd.read_excel(filename, sheet_name="Spectra")
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = pd.read_csv(area_filename, sep="\t")
data["Scan number"] = pd.to_numeric(data["Scan #"].str.extract("scan=(\d+)", expand=False))
data = pd.merge(data, file_with_area, left_on="Scan number", right_on="First Scan")
self.protein_name = protein_name
self.data = data.sort_values(by=['Area'], ascending=False)
self.replicate_id = replicate_id
self.condition_id = condition_id
self.data = data[data["Area"].notnull()]
self.data = self.data[(self.data["Score"] >= minimum_score) &
(self.data[protein_column_name].str.contains(protein_name))
# (data["Protein Name"] == ">"+protein_name) &
]
self.data = self.data[~self.data[protein_column_name].str.contains(">Reverse")]
if len(self.data.index) > 0:
self.empty = False
else:
self.empty = True
self.row_to_glycans = {}
self.glycan_to_row = {}
self.trust_byonic = trust_byonic
self.legacy = legacy
self.sequon_glycosites = set()
self.glycosylated_seq = set()
def calculate_glycan(self, glycan):
current_mass = 0
current_string = ""
for i in glycan:
current_string += i
if i == ")":
s = glycan_regex.search(current_string)
if s:
name = s.group(1)
amount = s.group(2)
current_mass += glycan_block_dict[name]*int(amount)
current_string = ""
return current_mass
def process(self):
# entries_number = len(self.data.index)
# if analysis == "N-glycan":
# expand_window = 2
# self.data["total_number_of_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_n-linked_sequon"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_hexnac"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_deamidation"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_asn"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_asn"] = pd.Series([0] * entries_number, index=self.data.index, dtype=int)
# elif analysis == "O-glycan":
# self.data["total_number_of_hex"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_modded_ser_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["total_number_of_unmodded_ser_or_thr"] = pd.Series([0]*entries_number, index=self.data.index, dtype=int)
# self.data["o_glycosylation_status"] = pd.Series([False]*entries_number, index=self.data.index, dtype=bool)
for i, r in self.data.iterrows():
glycan_dict = {}
search = sequence_regex.search(r[sequence_column_name])
seq = Sequence(search.group(0))
stripped_seq = seq.to_stripped_string()
# modifications = {}
# if pd.notnull(r[modifications_column_name]):
#
# for mod in r[modifications_column_name].split(","):
# number = 1
# if "*" in mod:
# m = mod.split("*")
# minimod = Sequence(m[0].strip())
# number = int(m[1].strip())
#
# else:
# minimod = Sequence(mod.strip())
# for mo in minimod[0].mods:
# if mo.value not in modifications:
# modifications[mo.value] = {}
# modifications[mo.value][minimod[0].value] = {"mod": deepcopy(mo),
# "number": number}
# #if minimod[0].mods[0].value not in modifications:
# # modifications[minimod[0].mods[0].value] = {}
# #modifications[minimod[0].mods[0].value][minimod[0].value] = {"mod": deepcopy(minimod[0].mods[0]),
# # "number": number}
#
# if minimod[0].value == "N":
# if analysis == "N-glycan":
# for mo in minimod[0].mods:
# if mo.value == 1:
# #if minimod[0].mods[0].value == 1:
# self.data.at[i, "total_number_of_deamidation"] += number
# self.data.at[i, "total_number_of_modded_asn"] += number
# elif minimod[0].value in "ST":
# if analysis == "O-glycan":
# for mo in minimod[0].mods:
# self.data.at[i, "total_number_of_modded_ser_thr"] += number
glycans = []
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
if search:
self.data.at[i, "stripped_seq"] = stripped_seq.rstrip(".").lstrip(".")
origin_seq = r[starting_position_column_name] - 1
glycan_reordered = []
self.data.at[i, "origin_start"] = origin_seq
self.data.at[i, "Ending Position"] = r[starting_position_column_name] + len(self.data.at[i, "stripped_seq"])
self.data.at[i, "position_to_glycan"] = ""
if self.trust_byonic:
n_site_status = {}
p_n = r[protein_column_name].lstrip(">")
# print(self.protein_name, p_n)
# motifs = [match for match in seq.find_with_regex(motif, ignore=seq.gaps())]
# if self.analysis == "N-glycan":
# if len(fasta_library[p_n]) >= origin_seq + expand_window:
# if expand_window:
# expanded_window = Sequence(fasta_library[p_n][origin_seq: origin_seq + len(self.data.at[i, "stripped_seq"]) + expand_window])
# expanded_window_motifs = [match for match in expanded_window.find_with_regex(motif, ignore=expanded_window.gaps())]
# origin_map = [i.start + origin_seq for i in expanded_window_motifs]
# if len(expanded_window_motifs) > len(motifs):
# self.data.at[i, "expanded_motif"] = str(expanded_window[expanded_window_motifs[-1]])
# self.data.at[i, "expanded_aa"] = str(expanded_window[-expand_window:])
#
# else:
# origin_map = [i.start + origin_seq for i in motifs]
# else:
# origin_map = [i.start + origin_seq for i in motifs]
#
# if analysis == "N-glycan":
# self.data.at[i, "total_number_of_asn"] = seq.count("N", 0, len(seq))
# if expand_window:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(expanded_window_motifs)
# else:
# self.data.at[i, "total_number_of_n-linked_sequon"] = len(motifs)
# self.data.at[i, "total_number_of_unmodded_asn"] = self.data.at[i, "total_number_of_asn"] - self.data.at[i, "total_number_of_modded_asn"]
# elif analysis == "O-glycan":
# self.data.at[i, "total_number_of_ser_thr"] = seq.count("S", 0, len(seq)) + seq.count("T", 0, len(seq))
# self.data.at[i, "total_number_of_unmodded_ser_or_thr"] = self.data.at[i, "total_number_of_modded_ser_thr"] - self.data.at[i, "total_number_of_modded_ser_thr"]
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
self.glycan_to_row[g] = i
glycosylated_site = []
for aa in range(1, len(seq) - 1):
if seq[aa].mods:
mod_value = float(seq[aa].mods[0].value)
round_mod_value = round(mod_value)
# str_mod_value = seq[aa].mods[0].value[0] + str(round_mod_value)
#if str_mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
# if seq[aa].value in modifications[str_mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[str_mod_value][seq[aa].value]['number'] > 0:
# modifications[str_mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = mod_value
round_3 = round(mod_value, 3)
if str(round_3) in glycan_dict:
seq[aa].extra = "Glycosylated"
pos = int(r[starting_position_column_name]) + aa - 2
self.sequon_glycosites.add(pos + 1)
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = seq[aa].value + str(pos + 1)
glycosylated_site.append(self.data.at[i, position] + "_" + str(round_mod_value))
glycosylation_count += 1
glycan_reordered.append(glycan_dict[str(round_3)])
if glycan_reordered:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if self.trust_byonic:
# if not in origin_map:
#
# # position = "{}_position".format(str(glycosylation_count))
# # self.data.at[i, position] = seq[aa].value + str(
# # r[starting_position_column_name]+aa)
# # self.data.at[i, position + "_match"] = "H"
# # glycosylation_count += 1
# self.data.at[i, "total_number_of_hexnac"] += 1
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
# if mod_value in modifications:
# if seq[aa].value in "ST" and analysis == "O-glycan":
# if round_mod_value == 80:
# continue
#
# if seq[aa].value in modifications[mod_value]:
# if seq[aa].value == "N" and round_mod_value == 1:
# seq[aa].extra = "Deamidated"
# continue
# if modifications[mod_value][seq[aa].value]['number'] > 0:
# modifications[mod_value][seq[aa].value]['number'] -= 1
# seq[aa].mods[0].mass = float(seq[aa].mods[0].value)
#
# if max_glycans and current_glycan != max_glycans:
#
# seq[aa].mods[0].value = glycans[current_glycan]
# seq[aa].extra = "Glycosylated"
#
# if seq[aa].value == "N":
# if analysis == "N-glycan":
# if "hexnac" in glycans[current_glycan].lower():
# self.data.at[i, "total_number_of_hexnac"] += 1
#
# elif seq[aa].value in "ST":
# if analysis == "O-glycan":
# self.data.at[i, "total_number_of_hex"] += 1
#
# current_glycan += 1
#if current_glycan == max_glycans:
#break
# for n in origin_map:
# position = "{}_position".format(str(glycosylation_count))
# self.data.at[i, position] = seq[n-origin_seq+1].value + str(
# n + 1)
#
# if seq[n-origin_seq+1].extra == "Glycosylated":
# self.data.at[i, position + "_match"] = "H"
# elif seq[n-origin_seq+1].extra == "Deamidated":
# self.data.at[i, position + "_match"] = "D"
# else:
# self.data.at[i, position + "_match"] = "U"
#
# if analysis == "N-glycan":
# if self.legacy:
# if self.data.at[i, "total_number_of_n-linked_sequon"] != self.data.at[i, "total_number_of_hexnac"]:
# if seq[n-origin_seq+1].extra == "Deamidated":
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "D"
# else:
# if self.data.at[i, "total_number_of_hexnac"] > 0:
# if self.data.at[i, "total_number_of_deamidation"] == 0:
# self.data.at[i, position + "_match"] = "H"
# else:
# self.data.at[i, position + "_match"] ="D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# if not seq[n-origin_seq+1].extra:
# if self.data.at[i, "total_number_of_hexnac"] > 0 and self.data.at[i, "total_number_of_deamidation"]> 0:
# self.data.at[i, position + "_match"] = "D/H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# elif self.data.at[i, "total_number_of_hexnac"] > 0:
# self.data.at[i, position + "_match"] = "H"
# if self.data.at[i, "total_number_of_unmodded_asn"] > 0:
# self.data.at[i, position + "_match"] = "D/H/U"
# else:
# self.data.at[i, position + "_match"] = "U"
# glycosylation_count += 1
else:
if pd.notnull(r[glycans_column_name]):
glycans = r[glycans_column_name].split(",")
glycans.sort()
self.data.at[i, glycans_column_name] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
self.glycosylated_seq.add(self.data.at[i, "stripped_seq"])
def analyze(self, max_sites=0, combine_d_u=True, splitting_sites=False):
result = []
temp = self.data.sort_values(["Area", "Score"], ascending=False)
temp[glycans_column_name] = temp[glycans_column_name].fillna("None")
out = []
if self.trust_byonic:
seq_glycosites = list(self.sequon_glycosites)
seq_glycosites.sort()
# print(seq_glycosites)
# if self.analysis == "N-glycan":
# if max_sites == 0:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"])]
# else:
# temp = temp[(0 < temp["total_number_of_n-linked_sequon"]) & (temp["total_number_of_n-linked_sequon"]<= max_sites) ]
for i, g in temp.groupby(["stripped_seq", "z", "glycoprofile", observed_mz]):
seq_within = []
unique_row = g.loc[g["Area"].idxmax()]
#
# glycan = 0
# first_site = ""
if seq_glycosites:
for n in seq_glycosites:
if unique_row[starting_position_column_name] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][n-unique_row[starting_position_column_name]]+str(n))
# print(unique_row)
# if self.legacy:
# for c in range(len(unique_row.index)):
# if unique_row.index[c].endswith("_position"):
#
# if pd.notnull(unique_row[unique_row.index[c]]):
# if not first_site:
# first_site = unique_row[unique_row.index[c]]
# if unique_row[unique_row.index[c]] not in result:
# result[unique_row[unique_row.index[c]]] = {}
#
# if "U" in unique_row[unique_row.index[c+1]]:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# elif "D" in unique_row[unique_row.index[c+1]]:
# if combine_d_u:
# if "U" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["U"] = 0
# result[unique_row[unique_row.index[c]]]["U"] += unique_row["Area"]
# else:
# if "D" not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]]["D"] = 0
# result[unique_row[unique_row.index[c]]]["D"] += unique_row["Area"]
# else:
# if splitting_sites or unique_row["total_number_of_hexnac"] == 1:
#
# if self.row_to_glycans[unique_row.name][glycan] not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][self.row_to_glycans[unique_row.name][glycan]] = 0
# result[unique_row[unique_row.index[c]]][
# self.row_to_glycans[unique_row.name][glycan]] += unique_row["Area"]
# glycan += 1
#
# else:
# if unique_row["total_number_of_hexnac"] > 1 and not splitting_sites:
# temporary_glycan = ";".join(self.row_to_glycans[unique_row.name][glycan])
#
# if temporary_glycan not in result[unique_row[unique_row.index[c]]]:
# result[unique_row[unique_row.index[c]]][temporary_glycan] = unique_row["Area"]
# break
# else:
glycosylation_count = 0
glycans = unique_row["position_to_glycan"].split(",")
for c in range(len(unique_row.index)):
if unique_row.index[c].endswith("_position"):
if pd.notnull(unique_row[unique_row.index[c]]):
pos = unique_row[unique_row.index[c]]
result.append({"Position": pos, "Glycans": glycans[glycosylation_count], "Value": unique_row["Area"]})
ind = seq_within.index(pos)
seq_within.pop(ind)
glycosylation_count += 1
if seq_within:
for s in seq_within:
result.append({"Position": s, "Glycans": "U", "Value": unique_row["Area"]})
# if N_combo:
#
# N_combo.sort()
# sequons = ";".join(N_combo)
#
# # working_isoform = unique_row["isoform"]
# # if working_isoform not in result:
# # # if working_isoform != 1.0 and 1.0 in result:
# # # if sequons in result[working_isoform][1.0]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][sequons] or "U" in result[working_isoform][1.0][sequons]:
# # # working_isoform = 1.0
# # # else:
# # result[working_isoform] = {}
# if sequons not in result[working_isoform]:
# result[working_isoform][sequons] = {}
# #if pd.notnull(unique_row[glycans_column_name]):
# if unique_row[glycans_column_name] != "None":
# if unique_row[glycans_column_name] not in result[working_isoform][sequons]:
# result[working_isoform][sequons][unique_row[glycans_column_name]] = 0
# result[working_isoform][sequons][unique_row[glycans_column_name]] += unique_row["Area"]
# else:
# if "U" not in result[working_isoform][sequons]:
# result[working_isoform][sequons]["U"] = 0
# result[working_isoform][sequons]["U"] += unique_row["Area"]
# #print(result)
if result:
result = pd.DataFrame(result)
group = result.groupby(["Position", "Glycans"])
out = group.agg(np.sum).reset_index()
else:
out = pd.DataFrame([], columns=["Position", "Glycans", "Values"])
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# out.append({"Isoform": k, "Position": k2, "Glycans": k3, "Value": result[k][k2][k3]})
else:
# result_total = {}
# if max_sites != 0:
# temp = temp[temp['total_number_of_hex'] <= max_sites]
for i, g in temp.groupby(["stripped_seq", "z", glycans_column_name, starting_position_column_name, observed_mz]):
unique_row = g.loc[g["Area"].idxmax()]
if unique_row[glycans_column_name] != "None":
result.append({"Peptides": i[0], "Glycans": i[2], "Value": unique_row["Area"], "Position": i[3]})
else:
result.append({"Peptides": i[0], "Glycans": "U", "Value": unique_row["Area"], "Position": i[3]})
result = pd.DataFrame(result)
group = result.groupby(["Peptides", "Position", "Glycans"])
out = group.agg(np.sum).reset_index()
# working_isoform = unique_row["isoform"]
# if working_isoform not in result:
# # if working_isoform != 1.0 and 1.0 in result:
# # if unique_row["stripped_seq"] in result[working_isoform][1.0]:
# # #if i[3] in result[working_isoform][1.0][unique_row["stripped_seq"]]:
# # # if unique_row[glycans_column_name] in result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]] or "U" in \
# # # result[working_isoform][1.0][unique_row["stripped_seq"]][i[3]]:
# # working_isoform = 1.0
# # else:
# result[working_isoform] = {}
#
# if unique_row["stripped_seq"] not in result[working_isoform]:
# result[working_isoform][unique_row["stripped_seq"]] = {}
# # result_total[unique_row["isoform"]][unique_row["stripped_seq"]] = 0
# if i[3] not in result[working_isoform][unique_row["stripped_seq"]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]] = {}
# if i[2] == "None":
# if "U" not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]]["U"] += unique_row["Area"]
#
# else:
# # if splitting_sites:
# # for gly in self.row_to_glycans[unique_row.name]:
# # if gly not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] = 0
# # result[working_isoform][unique_row["stripped_seq"]][i[3]][gly] += unique_row["Area"]
# # else:
# if unique_row[glycans_column_name] not in result[working_isoform][unique_row["stripped_seq"]][i[3]]:
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] = 0
# result[working_isoform][unique_row["stripped_seq"]][i[3]][unique_row[glycans_column_name]] += unique_row["Area"]
#
# for k in result:
# for k2 in result[k]:
# for k3 in result[k][k2]:
# for k4 in result[k][k2][k3]:
# out.append({"Isoform": k, "Peptides": k2, "Glycans": k4, "Value": result[k][k2][k3][k4], "Position": k3})
return Result(out)
class GlypnirO:
def __init__(self, trust_byonic=False, get_uniprot=False):
self.trust_byonic = trust_byonic
self.components = None
self.uniprot_parsed_data =
|
pd.DataFrame([])
|
pandas.DataFrame
|
import numpy as np
import pandas
from scipy.stats import rankdata
from bayesian_benchmarks.database_utils import Database
from bayesian_benchmarks.data import regression_datasets, classification_datasets
from bayesian_benchmarks.data import _ALL_REGRESSION_DATATSETS, _ALL_CLASSIFICATION_DATATSETS
_ALL_DATASETS = {}
_ALL_DATASETS.update(_ALL_REGRESSION_DATATSETS)
_ALL_DATASETS.update(_ALL_CLASSIFICATION_DATATSETS)
def sort_data_by_N(datasets):
Ns = [_ALL_DATASETS[dataset].N for dataset in datasets]
order = np.argsort(Ns)
return list(np.array(datasets)[order])
regression_datasets = sort_data_by_N(regression_datasets)
classification_datasets = sort_data_by_N(classification_datasets)
database_path = 'results/results.db'
def rank_array(A):
res = []
for a in A.reshape([np.prod(A.shape[:-1]), A.shape[-1]]):
a[np.isnan(a)] = -1e10
res.append(rankdata(a))
res = np.array(res)
return res.reshape(A.shape)
dataset_colors = {
'challenger': [0,0],
'fertility': [0,0],
'concreteslump':[0,0],
'autos': [0,0],
'servo': [0,0],
'breastcancer': [0,0],
'machine': [0,0],
'yacht': [0,0],
'autompg': [0,0],
'boston': [0,0],
'forest': [1,0],
'stock': [0,0],
'pendulum': [0,0],
'energy': [0,0],
'concrete': [0,0],
'solar': [1,0],
'airfoil': [0,0],
'winered': [0,0],
'gas': [0,0],
'skillcraft': [0,0],
'sml': [0,1],
'winewhite': [0,0],
'parkinsons': [0,0],
'kin8nm': [0,1],
'pumadyn32nm': [0,0],
'power': [1,0],
'naval': [0,0],
'pol': [1,1],
'elevators': [0,0],
'bike': [1,1],
'kin40k': [0,1],
'protein': [0,0],
'tamielectric': [0,0],
'keggdirected': [1,1],
'slice': [0,1],
'keggundirected':[1,0],
'3droad': [0,0],
'song': [0,0],
'buzz': [0,0],
'nytaxi': [0,0],
'houseelectric':[1,1]
}
def read(datasets, models, splits, table, field, extra_text='', highlight_max=True, highlight_non_gaussian=True, use_error_bars=True):
results = []
results_test_shapiro_W_median = []
with Database(database_path) as db:
for dataset in datasets:
for dd in models:
for split in splits:
d = {'dataset': dataset,
'split' : split}
d.update({'iterations':100000})
d.update({k:dd[k] for k in ['configuration', 'mode']})
if True:# _ALL_REGRESSION_DATATSETS[dataset].N < 1000:
res = db.read(table, [field, 'test_shapiro_W_median'], d)
else:
res = []
if len(res) > 0:
try:
results.append(float(res[0][0]))
results_test_shapiro_W_median.append(float(res[0][1]))
except:
print(res, d, dataset)
# results.append(np.nan)
# results_test_shapiro_W_median.append(np.nan)
else:
results.append(np.nan)
results_test_shapiro_W_median.append(np.nan)
results = np.array(results).reshape(len(datasets), len(models), len(splits))
results_test_shapiro_W_median = np.array(results_test_shapiro_W_median).reshape(len(datasets), len(models), len(splits))
results_test_shapiro_W_median = np.average(results_test_shapiro_W_median, -1)
results_mean = np.nanmean(results, -1)
results_std_err = np.nanstd(results, -1)/float(len(splits))**0.5
argmax = np.argmax(results_mean, 1)
lower_pts = [m[a]-e[a] for m, e, a in zip(results_mean, results_std_err, argmax)]
high_pts = results_mean + results_std_err
argmaxes = [np.where(h>l)[0] for h, l in zip(high_pts, lower_pts)]
rs = rank_array(np.transpose(results, [0, 2, 1]))
rs_flat = rs.reshape(len(datasets) * len(splits), len(models))
avg_ranks = np.average(rs_flat, 0)
std_ranks = np.std(rs_flat, 0) / float(len(datasets) * len(splits))**0.5
r = ['{:.2f} ({:.2f})'.format(m, s) for m, s in zip(avg_ranks, std_ranks)]
res_combined = []
for i, (ms, es, Ws) in enumerate(zip(results_mean, results_std_err, results_test_shapiro_W_median)):
for j, (m, e, W) in enumerate(zip(ms, es, Ws)):
if field == 'test_shapiro_W_median':
if m < 0.999:
res_combined.append('{:.4f}'.format(m))
else:
res_combined.append(r' ')
else:
if m > -1000:
if use_error_bars:
if m > -10:
t = '{:.2f} ({:.2f})'.format(m, e)
else:
t = '{:.0f} ({:.0f})'.format(m, e)
else:
if m > -10:
t = '{:.2f}'.format(m)
else:
t = '{:.0f}'.format(m)
if highlight_max and (j in argmaxes[i]):
t = r'\textbf{' + t + '}'
if highlight_non_gaussian and (W<0.99):
t = r'\textit{' + t + '}'
res_combined.append(t)
else:
res_combined.append('$-\infty$')
results_pandas = np.array(res_combined).reshape(results_mean.shape)
extra_fields = []
extra_fields.append('Avg ranks')
results_pandas = np.concatenate([results_pandas, np.array(r).reshape(1, -1)], 0)
extra_fields.append('Median diff from gp')
ind = np.where(np.array([mm['nice_name'] for mm in models])=='G')[0][0]
median = np.nanmedian(np.transpose(results - results[:, ind, :][:, None, :], [0, 2, 1]).reshape(len(datasets)*len(splits), len(models)), 0)
median = ['{:.2f}'.format(m) for m in median]
results_pandas = np.concatenate([results_pandas, np.array(median).reshape(1, -1)], 0)
_datasets = []
for d in datasets:
if 'wilson' in d:
nd = d[len('wilson_'):]
else:
nd = d
if (dataset_colors[nd][0] == 0) and (dataset_colors[nd][1] == 0):
_d = nd
elif (dataset_colors[nd][0] == 1) and (dataset_colors[nd][1] == 0):
_d = r'{\color{myAcolor} \textbf{' + nd + '}\myAcolormarker}'
elif (dataset_colors[nd][0] == 0) and (dataset_colors[nd][1] == 1):
_d = r'{\color{myBcolor} \textbf{' + nd + '}\myBcolormarker}'
elif (dataset_colors[nd][0] == 1) and (dataset_colors[nd][1] == 1):
_d = r'{\color{myCcolor} \textbf{' + nd + '}\myCcolormarker}'
_datasets.append(_d)
res =
|
pandas.DataFrame(data=results_pandas, index=_datasets + extra_fields, columns=[m['nice_name'] for m in models])
|
pandas.DataFrame
|
"""
This script contains helper functions to make plots presented in the paper
"""
from itertools import product
from itertools import compress
import copy
from pickle import UnpicklingError
import dill as pickle
from adaptive.saving import *
from IPython.display import display, HTML
import scipy.stats as stats
from glob import glob
from time import time
from scipy.stats import norm
import seaborn as sns
from adaptive.compute import collect
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import cm
from matplotlib.lines import Line2D
import numpy as np
from matplotlib.ticker import FormatStrFormatter
np.seterr(all='raise')
def read_files(file_name):
files = glob(file_name)
print(f'Found {len(files)} files.')
results = []
for file in files:
try:
with open(file, 'rb') as f:
r = pickle.load(f)
results.extend(r)
except: # UnpicklingError:
print(f"Skipping corrupted file: {file}")
return results
def add_config(dfs, r):
dfs = pd.concat(dfs)
for key in r['config']:
if key == 'policy_names':
continue
dfs[key] = r['config'][key]
return dfs
def save_data_timepoints(data, timepoints, method, K, order):
data = data[timepoints, :]
return pd.DataFrame({
"time": np.tile(timepoints, K),
"policy": np.repeat(np.arange(K), len(timepoints)),
"value": data.flatten(order=order),
"method": [method] * data.size,
})
def generate_data_frames(results):
"""
Generate DataFrames from the raw saving results.
"""
df_stats = []
df_probs = []
df_covs = []
for r in results:
CONFIG_COLS = list(r['config'].keys())
CONFIG_COLS.remove('policy_value')
# get statistics table
tabs_stats = []
T = r['config']['T']
for weight, stats in r['stats'].items():
statistics = ['Bias', 'Var']
tab_stat = pd.DataFrame({"statistic": statistics,
"value": stats.flatten(),
'weight': [weight] * len(statistics)
})
tabs_stats.append(tab_stat)
df_stats.append(add_config(tabs_stats, r))
df_stats = pd.concat(df_stats)
# add true standard error, relative variance, relerrors and coverage in df_stats
confidence_level = np.array([0.9, 0.95])
quantile = norm.ppf(0.5+confidence_level/2)
new_stats = []
# group_keys = [*CONFIG_COLS, 'policy', 'weight',]
group_keys = ['experiment', 'policy', 'weight']
for *config, df_cfg in df_stats.groupby(group_keys):
weight = config[0][group_keys.index('weight')]
df_bias = df_cfg.query("statistic=='Bias'")
df_var = df_cfg.query("statistic=='Var'")
true_se = np.std(df_bias['value'])
if true_se < 1e-6:
print(
f"For config {dict(zip([*CONFIG_COLS, 'policy', 'weight'], config))} data is not sufficient, only has {len(df_bias)} samples.")
continue
# relative S.E.
df_relse = pd.DataFrame.copy(df_var)
df_relse['value'] = np.sqrt(np.array(df_relse['value'])) / true_se
df_relse['statistic'] = 'relative S.E.'
# true S.E.
df_truese = pd.DataFrame.copy(df_var)
df_truese['value'] = true_se
df_truese['statistic'] = 'true S.E.'
# relative error
df_relerror = pd.DataFrame.copy(df_bias)
df_relerror['value'] = np.array(df_relerror['value']) / true_se
df_relerror['statistic'] = 'R.E.'
# tstat
df_tstat = pd.DataFrame.copy(df_bias)
df_tstat['value'] = np.array(
df_tstat['value']) / np.sqrt(np.array(df_var['value']))
df_tstat['statistic'] = 't-stat'
new_stats.extend([df_relse, df_truese, df_relerror, df_tstat])
# coverage
for p, q in zip(confidence_level, quantile):
df_relerror_cov = pd.DataFrame.copy(df_relerror)
df_relerror_cov['value'] = (
np.abs(np.array(df_relerror['value'])) < q).astype(float)
df_relerror_cov['statistic'] = f'{int(p*100)}% coverage of R.E.'
df_tstat_cov =
|
pd.DataFrame.copy(df_tstat)
|
pandas.DataFrame.copy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.